From 17db4cb9703bcaac3a18ff1c2bbc43c57af9d6b6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:22:52 +0100 Subject: [PATCH 001/318] refact "cscli machines" (#2777) --- cmd/crowdsec-cli/bouncers.go | 5 +- cmd/crowdsec-cli/flag.go | 28 +++ cmd/crowdsec-cli/items.go | 2 - cmd/crowdsec-cli/machines.go | 420 ++++++++++++++++++----------------- cmd/crowdsec-cli/main.go | 22 +- cmd/crowdsec-cli/support.go | 5 +- test/bats/30_machines.bats | 11 +- 7 files changed, 268 insertions(+), 225 deletions(-) create mode 100644 cmd/crowdsec-cli/flag.go diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 410827b3159..d2685901ebb 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -16,7 +16,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -38,10 +37,10 @@ func askYesNo(message string, defaultAnswer bool) (bool, error) { type cliBouncers struct { db *database.Client - cfg func() *csconfig.Config + cfg configGetter } -func NewCLIBouncers(getconfig func() *csconfig.Config) *cliBouncers { +func NewCLIBouncers(getconfig configGetter) *cliBouncers { return &cliBouncers{ cfg: getconfig, } diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/flag.go new file mode 100644 index 00000000000..402302a1f64 --- /dev/null +++ b/cmd/crowdsec-cli/flag.go @@ -0,0 +1,28 @@ +package main + +// Custom types for flag validation and conversion. + +import ( + "errors" +) + +type MachinePassword string + +func (p *MachinePassword) String() string { + return string(*p) +} + +func (p *MachinePassword) Set(v string) error { + // a password can't be more than 72 characters + // due to bcrypt limitations + if len(v) > 72 { + return errors.New("password too long (max 72 characters)") + } + *p = MachinePassword(v) + + return nil +} + +func (p *MachinePassword) Type() string { + return "string" +} diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index a1d079747fa..851be553f15 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -138,8 +138,6 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item } csvwriter.Flush() - default: - return fmt.Errorf("unknown output format '%s'", csConfig.Cscli.Output) } return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 581683baa8f..0cabccf76f5 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -5,7 +5,6 @@ import ( "encoding/csv" "encoding/json" "fmt" - "io" "math/big" "os" "strings" @@ -101,85 +100,97 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) { return hb, true } -func getAgents(out io.Writer, dbClient *database.Client) error { - machines, err := dbClient.ListMachines() +type cliMachines struct{ + db *database.Client + cfg configGetter +} + +func NewCLIMachines(getconfig configGetter) *cliMachines { + return &cliMachines{ + cfg: getconfig, + } +} + +func (cli *cliMachines) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "machines [action]", + Short: "Manage local API machines [requires local API]", + Long: `To list/add/delete/validate/prune machines. +Note: This command requires database direct access, so is intended to be run on the local API machine. +`, + Example: `cscli machines [action]`, + DisableAutoGenTag: true, + Aliases: []string{"machine"}, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + var err error + if err = require.LAPI(cli.cfg()); err != nil { + return err + } + cli.db, err = database.NewClient(cli.cfg().DbConfig) + if err != nil { + return fmt.Errorf("unable to create new database client: %s", err) + } + return nil + }, + } + + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newValidateCmd()) + cmd.AddCommand(cli.newPruneCmd()) + + return cmd +} + +func (cli *cliMachines) list() error { + out := color.Output + + machines, err := cli.db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %s", err) } - switch csConfig.Cscli.Output { + switch cli.cfg().Cscli.Output { case "human": getAgentsTable(out, machines) case "json": enc := json.NewEncoder(out) enc.SetIndent("", " ") + if err := enc.Encode(machines); err != nil { return fmt.Errorf("failed to marshal") } + return nil case "raw": csvwriter := csv.NewWriter(out) + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) if err != nil { return fmt.Errorf("failed to write header: %s", err) } + for _, m := range machines { validated := "false" if m.IsValidated { validated = "true" } + hb, _ := getLastHeartbeat(m) - err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}) - if err != nil { + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}); err != nil { return fmt.Errorf("failed to write raw output: %w", err) } } - csvwriter.Flush() - default: - return fmt.Errorf("unknown output '%s'", csConfig.Cscli.Output) - } - return nil -} - -type cliMachines struct{} - -func NewCLIMachines() *cliMachines { - return &cliMachines{} -} -func (cli cliMachines) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "machines [action]", - Short: "Manage local API machines [requires local API]", - Long: `To list/add/delete/validate/prune machines. -Note: This command requires database direct access, so is intended to be run on the local API machine. -`, - Example: `cscli machines [action]`, - DisableAutoGenTag: true, - Aliases: []string{"machine"}, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - var err error - if err = require.LAPI(csConfig); err != nil { - return err - } - dbClient, err = database.NewClient(csConfig.DbConfig) - if err != nil { - return fmt.Errorf("unable to create new database client: %s", err) - } - return nil - }, + csvwriter.Flush() } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewAddCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) - cmd.AddCommand(cli.NewValidateCmd()) - cmd.AddCommand(cli.NewPruneCmd()) - - return cmd + return nil } -func (cli cliMachines) NewListCmd() *cobra.Command { +func (cli *cliMachines) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list all machines in the database", @@ -188,84 +199,60 @@ func (cli cliMachines) NewListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - err := getAgents(color.Output, dbClient) - if err != nil { - return fmt.Errorf("unable to list machines: %s", err) - } - - return nil + return cli.list() }, } return cmd } -func (cli cliMachines) NewAddCmd() *cobra.Command { +func (cli *cliMachines) newAddCmd() *cobra.Command { + var ( + password MachinePassword + dumpFile string + apiURL string + interactive bool + autoAdd bool + force bool + ) + cmd := &cobra.Command{ Use: "add", Short: "add a single machine to the database", DisableAutoGenTag: true, Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, - Example: ` -cscli machines add --auto + Example: `cscli machines add --auto cscli machines add MyTestMachine --auto cscli machines add MyTestMachine --password MyPassword -`, - RunE: cli.add, +cscli machines add -f- --auto > /tmp/mycreds.yaml`, + RunE: func(_ *cobra.Command, args []string) error { + return cli.add(args, string(password), dumpFile, apiURL, interactive, autoAdd, force) + }, } flags := cmd.Flags() - flags.StringP("password", "p", "", "machine password to login to the API") - flags.StringP("file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") - flags.StringP("url", "u", "", "URL of the local API") - flags.BoolP("interactive", "i", false, "interfactive mode to enter the password") - flags.BoolP("auto", "a", false, "automatically generate password (and username if not provided)") - flags.Bool("force", false, "will force add the machine if it already exist") + flags.VarP(&password, "password", "p", "machine password to login to the API") + flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") + flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") + flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") return cmd } -func (cli cliMachines) add(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - machinePassword, err := flags.GetString("password") - if err != nil { - return err - } - - dumpFile, err := flags.GetString("file") - if err != nil { - return err - } - - apiURL, err := flags.GetString("url") - if err != nil { - return err - } - - interactive, err := flags.GetBool("interactive") - if err != nil { - return err - } - - autoAdd, err := flags.GetBool("auto") - if err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - var machineID string +func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { + var ( + err error + machineID string + ) // create machineID if not specified by user if len(args) == 0 { if !autoAdd { - printHelp(cmd) - return nil + return fmt.Errorf("please specify a machine name to add, or use --auto") } + machineID, err = generateID("") if err != nil { return fmt.Errorf("unable to generate machine id: %s", err) @@ -274,15 +261,18 @@ func (cli cliMachines) add(cmd *cobra.Command, args []string) error { machineID = args[0] } + clientCfg := cli.cfg().API.Client + serverCfg := cli.cfg().API.Server + /*check if file already exists*/ - if dumpFile == "" && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { - credFile := csConfig.API.Client.CredentialsFilePath + if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" { + credFile := clientCfg.CredentialsFilePath // use the default only if the file does not exist _, err = os.Stat(credFile) switch { case os.IsNotExist(err) || force: - dumpFile = csConfig.API.Client.CredentialsFilePath + dumpFile = credFile case err != nil: return fmt.Errorf("unable to stat '%s': %s", credFile, err) default: @@ -302,49 +292,85 @@ func (cli cliMachines) add(cmd *cobra.Command, args []string) error { machinePassword = generatePassword(passwordLength) } else if machinePassword == "" && interactive { qs := &survey.Password{ - Message: "Please provide a password for the machine", + Message: "Please provide a password for the machine:", } survey.AskOne(qs, &machinePassword) } + password := strfmt.Password(machinePassword) - _, err = dbClient.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) + + _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) if err != nil { return fmt.Errorf("unable to create machine: %s", err) } - fmt.Printf("Machine '%s' successfully added to the local API.\n", machineID) + + fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) if apiURL == "" { - if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" { - apiURL = csConfig.API.Client.Credentials.URL - } else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" { - apiURL = "http://" + csConfig.API.Server.ListenURI + if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { + apiURL = clientCfg.Credentials.URL + } else if serverCfg != nil && serverCfg.ListenURI != "" { + apiURL = "http://" + serverCfg.ListenURI } else { return fmt.Errorf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") } } + apiCfg := csconfig.ApiCredentialsCfg{ Login: machineID, Password: password.String(), URL: apiURL, } + apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { return fmt.Errorf("unable to marshal api credentials: %s", err) } + if dumpFile != "" && dumpFile != "-" { err = os.WriteFile(dumpFile, apiConfigDump, 0o600) if err != nil { return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err) } - fmt.Printf("API credentials written to '%s'.\n", dumpFile) + fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) } else { - fmt.Printf("%s\n", string(apiConfigDump)) + fmt.Print(string(apiConfigDump)) } return nil } -func (cli cliMachines) NewDeleteCmd() *cobra.Command { +func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + machines, err := cli.db.ListMachines() + if err != nil { + cobra.CompError("unable to list machines " + err.Error()) + } + + ret := []string{} + + for _, machine := range machines { + if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) { + ret = append(ret, machine.MachineId) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp +} + +func (cli *cliMachines) delete(machines []string) error { + for _, machineID := range machines { + err := cli.db.DeleteWatcher(machineID) + if err != nil { + log.Errorf("unable to delete machine '%s': %s", machineID, err) + return nil + } + log.Infof("machine '%s' deleted successfully", machineID) + } + + return nil +} + +func (cli *cliMachines) newDeleteCmd() *cobra.Command { cmd := &cobra.Command{ Use: "delete [machine_name]...", Short: "delete machine(s) by name", @@ -352,40 +378,75 @@ func (cli cliMachines) NewDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - machines, err := dbClient.ListMachines() - if err != nil { - cobra.CompError("unable to list machines " + err.Error()) - } - ret := make([]string, 0) - for _, machine := range machines { - if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) { - ret = append(ret, machine.MachineId) - } - } - return ret, cobra.ShellCompDirectiveNoFileComp + ValidArgsFunction: cli.deleteValid, + RunE: func(_ *cobra.Command, args []string) error { + return cli.delete(args) }, - RunE: cli.delete, } return cmd } -func (cli cliMachines) delete(_ *cobra.Command, args []string) error { - for _, machineID := range args { - err := dbClient.DeleteWatcher(machineID) - if err != nil { - log.Errorf("unable to delete machine '%s': %s", machineID, err) +func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { + if duration < 2*time.Minute && !notValidOnly { + if yes, err := askYesNo( + "The duration you provided is less than 2 minutes. " + + "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") return nil } - log.Infof("machine '%s' deleted successfully", machineID) } + machines := []*ent.Machine{} + if pending, err := cli.db.QueryPendingMachine(); err == nil { + machines = append(machines, pending...) + } + + if !notValidOnly { + if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(duration)); err == nil { + machines = append(machines, pending...) + } + } + + if len(machines) == 0 { + fmt.Println("no machines to prune") + return nil + } + + getAgentsTable(color.Output, machines) + + if !force { + if yes, err := askYesNo( + "You are about to PERMANENTLY remove the above machines from the database. " + + "These will NOT be recoverable. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + deleted, err := cli.db.BulkDeleteWatchers(machines) + if err != nil { + return fmt.Errorf("unable to prune machines: %s", err) + } + + fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) + return nil } -func (cli cliMachines) NewPruneCmd() *cobra.Command { - var parsedDuration time.Duration +func (cli *cliMachines) newPruneCmd() *cobra.Command { + var ( + duration time.Duration + notValidOnly bool + force bool + ) + + const defaultDuration = 10 * time.Minute + cmd := &cobra.Command{ Use: "prune", Short: "prune multiple machines from the database", @@ -395,76 +456,29 @@ cscli machines prune --duration 1h cscli machines prune --not-validated-only --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - dur, _ := cmd.Flags().GetString("duration") - var err error - parsedDuration, err = time.ParseDuration(fmt.Sprintf("-%s", dur)) - if err != nil { - return fmt.Errorf("unable to parse duration '%s': %s", dur, err) - } - return nil - }, - RunE: func(cmd *cobra.Command, _ []string) error { - notValidOnly, _ := cmd.Flags().GetBool("not-validated-only") - force, _ := cmd.Flags().GetBool("force") - if parsedDuration >= 0-60*time.Second && !notValidOnly { - var answer bool - prompt := &survey.Confirm{ - Message: "The duration you provided is less than or equal 60 seconds this can break installations do you want to continue ?", - Default: false, - } - if err := survey.AskOne(prompt, &answer); err != nil { - return fmt.Errorf("unable to ask about prune check: %s", err) - } - if !answer { - fmt.Println("user aborted prune no changes were made") - return nil - } - } - machines := make([]*ent.Machine, 0) - if pending, err := dbClient.QueryPendingMachine(); err == nil { - machines = append(machines, pending...) - } - if !notValidOnly { - if pending, err := dbClient.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(parsedDuration)); err == nil { - machines = append(machines, pending...) - } - } - if len(machines) == 0 { - fmt.Println("no machines to prune") - return nil - } - getAgentsTable(color.Output, machines) - if !force { - var answer bool - prompt := &survey.Confirm{ - Message: "You are about to PERMANENTLY remove the above machines from the database these will NOT be recoverable, continue ?", - Default: false, - } - if err := survey.AskOne(prompt, &answer); err != nil { - return fmt.Errorf("unable to ask about prune check: %s", err) - } - if !answer { - fmt.Println("user aborted prune no changes were made") - return nil - } - } - nbDeleted, err := dbClient.BulkDeleteWatchers(machines) - if err != nil { - return fmt.Errorf("unable to prune machines: %s", err) - } - fmt.Printf("successfully delete %d machines\n", nbDeleted) - return nil + RunE: func(_ *cobra.Command, _ []string) error { + return cli.prune(duration, notValidOnly, force) }, } - cmd.Flags().StringP("duration", "d", "10m", "duration of time since validated machine last heartbeat") - cmd.Flags().Bool("not-validated-only", false, "only prune machines that are not validated") - cmd.Flags().Bool("force", false, "force prune without asking for confirmation") + + flags := cmd.Flags() + flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") + flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") + flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") return cmd } -func (cli cliMachines) NewValidateCmd() *cobra.Command { +func (cli *cliMachines) validate(machineID string) error { + if err := cli.db.ValidateMachine(machineID); err != nil { + return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) + } + log.Infof("machine '%s' validated successfully", machineID) + + return nil +} + +func (cli *cliMachines) newValidateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "validate", Short: "validate a machine to access the local API", @@ -472,14 +486,8 @@ func (cli cliMachines) NewValidateCmd() *cobra.Command { Example: `cscli machines validate "machine_name"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - machineID := args[0] - if err := dbClient.ValidateMachine(machineID); err != nil { - return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) - } - log.Infof("machine '%s' validated successfully", machineID) - - return nil + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(args[0]) }, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index fda4cddc2bc..91e31a9778c 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -21,7 +21,7 @@ var ConfigFilePath string var csConfig *csconfig.Config var dbClient *database.Client -var OutputFormat string +var outputFormat string var OutputColor string var mergedConfig string @@ -29,6 +29,8 @@ var mergedConfig string // flagBranch overrides the value in csConfig.Cscli.HubBranch var flagBranch = "" +type configGetter func() *csconfig.Config + func initConfig() { var err error @@ -64,16 +66,18 @@ func initConfig() { csConfig.Cscli.HubBranch = flagBranch } - if OutputFormat != "" { - csConfig.Cscli.Output = OutputFormat - - if OutputFormat != "json" && OutputFormat != "raw" && OutputFormat != "human" { - log.Fatalf("output format %s unknown", OutputFormat) - } + if outputFormat != "" { + csConfig.Cscli.Output = outputFormat } + if csConfig.Cscli.Output == "" { csConfig.Cscli.Output = "human" } + + if csConfig.Cscli.Output != "human" && csConfig.Cscli.Output != "json" && csConfig.Cscli.Output != "raw" { + log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) + } + if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) log.SetLevel(log.ErrorLevel) @@ -146,7 +150,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.SetOut(color.Output) cmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") - cmd.PersistentFlags().StringVarP(&OutputFormat, "output", "o", "", "Output format: human, json, raw") + cmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", "", "Output format: human, json, raw") cmd.PersistentFlags().StringVarP(&OutputColor, "color", "", "auto", "Output color: yes, no, auto") cmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug") cmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info") @@ -197,7 +201,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIAlerts().NewCommand()) cmd.AddCommand(NewCLISimulation().NewCommand()) cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) - cmd.AddCommand(NewCLIMachines().NewCommand()) + cmd.AddCommand(NewCLIMachines(getconfig).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) cmd.AddCommand(NewLapiCmd()) cmd.AddCommand(NewCompletionCmd()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 47768e7c2ee..ed7f7cf2ffd 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -159,10 +159,11 @@ func collectBouncers(dbClient *database.Client) ([]byte, error) { func collectAgents(dbClient *database.Client) ([]byte, error) { out := bytes.NewBuffer(nil) - err := getAgents(out, dbClient) + machines, err := dbClient.ListMachines() if err != nil { - return nil, err + return nil, fmt.Errorf("unable to list machines: %s", err) } + getAgentsTable(out, machines) return out.Bytes(), nil } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index c7a72c334b1..f32c376e5b0 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -34,13 +34,18 @@ teardown() { rune -0 jq -r '.msg' <(stderr) assert_output --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' rune -0 cscli machines add local -a --force - assert_output --partial "Machine 'local' successfully added to the local API." + assert_stderr --partial "Machine 'local' successfully added to the local API." +} + +@test "passwords have a size limit" { + rune -1 cscli machines add local --password "$(printf '%73s' '' | tr ' ' x)" + assert_stderr --partial "password too long (max 72 characters)" } @test "add a new machine and delete it" { rune -0 cscli machines add -a -f /dev/null CiTestMachine -o human - assert_output --partial "Machine 'CiTestMachine' successfully added to the local API" - assert_output --partial "API credentials written to '/dev/null'" + assert_stderr --partial "Machine 'CiTestMachine' successfully added to the local API" + assert_stderr --partial "API credentials written to '/dev/null'" # we now have two machines rune -0 cscli machines list -o json From 785fce4dc752581613375b9edc23ee87d51cd98e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:24:00 +0100 Subject: [PATCH 002/318] refact "cscli alerts" (#2778) --- cmd/crowdsec-cli/alerts.go | 161 ++++++++++++++----------------------- 1 file changed, 59 insertions(+), 102 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 15824d2d067..4ab71be5bbf 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -11,7 +11,6 @@ import ( "strconv" "strings" "text/template" - "time" "github.com/fatih/color" "github.com/go-openapi/strfmt" @@ -48,52 +47,9 @@ func DecisionsFromAlert(alert *models.Alert) string { return ret } -func DateFromAlert(alert *models.Alert) string { - ts, err := time.Parse(time.RFC3339, alert.CreatedAt) - if err != nil { - log.Infof("while parsing %s with %s : %s", alert.CreatedAt, time.RFC3339, err) - return alert.CreatedAt - } - return ts.Format(time.RFC822) -} - -func SourceFromAlert(alert *models.Alert) string { - - //more than one item, just number and scope - if len(alert.Decisions) > 1 { - return fmt.Sprintf("%d %ss (%s)", len(alert.Decisions), *alert.Decisions[0].Scope, *alert.Decisions[0].Origin) - } - - //fallback on single decision information - if len(alert.Decisions) == 1 { - return fmt.Sprintf("%s:%s", *alert.Decisions[0].Scope, *alert.Decisions[0].Value) - } - - //try to compose a human friendly version - if *alert.Source.Value != "" && *alert.Source.Scope != "" { - scope := fmt.Sprintf("%s:%s", *alert.Source.Scope, *alert.Source.Value) - extra := "" - if alert.Source.Cn != "" { - extra = alert.Source.Cn - } - if alert.Source.AsNumber != "" { - extra += fmt.Sprintf("/%s", alert.Source.AsNumber) - } - if alert.Source.AsName != "" { - extra += fmt.Sprintf("/%s", alert.Source.AsName) - } - - if extra != "" { - scope += " (" + extra + ")" - } - return scope - } - return "" -} - -func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { - - if csConfig.Cscli.Output == "raw" { +func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + switch csConfig.Cscli.Output { + case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} if printMachine { @@ -123,7 +79,7 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { } } csvwriter.Flush() - } else if csConfig.Cscli.Output == "json" { + case "json": if *alerts == nil { // avoid returning "null" in json // could be cleaner if we used slice of alerts directly @@ -131,8 +87,8 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { return nil } x, _ := json.MarshalIndent(alerts, "", " ") - fmt.Printf("%s", string(x)) - } else if csConfig.Cscli.Output == "human" { + fmt.Print(string(x)) + case "human": if len(*alerts) == 0 { fmt.Println("No active alerts") return nil @@ -160,59 +116,60 @@ var alertTemplate = ` ` -func DisplayOneAlert(alert *models.Alert, withDetail bool) error { - if csConfig.Cscli.Output == "human" { - tmpl, err := template.New("alert").Parse(alertTemplate) - if err != nil { - return err - } - err = tmpl.Execute(os.Stdout, alert) - if err != nil { - return err - } - - alertDecisionsTable(color.Output, alert) +func displayOneAlert(alert *models.Alert, withDetail bool) error { + tmpl, err := template.New("alert").Parse(alertTemplate) + if err != nil { + return err + } + err = tmpl.Execute(os.Stdout, alert) + if err != nil { + return err + } - if len(alert.Meta) > 0 { - fmt.Printf("\n - Context :\n") - sort.Slice(alert.Meta, func(i, j int) bool { - return alert.Meta[i].Key < alert.Meta[j].Key - }) - table := newTable(color.Output) - table.SetRowLines(false) - table.SetHeaders("Key", "Value") - for _, meta := range alert.Meta { - var valSlice []string - if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil { - return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err) - } - for _, value := range valSlice { - table.AddRow( - meta.Key, - value, - ) - } + alertDecisionsTable(color.Output, alert) + + if len(alert.Meta) > 0 { + fmt.Printf("\n - Context :\n") + sort.Slice(alert.Meta, func(i, j int) bool { + return alert.Meta[i].Key < alert.Meta[j].Key + }) + table := newTable(color.Output) + table.SetRowLines(false) + table.SetHeaders("Key", "Value") + for _, meta := range alert.Meta { + var valSlice []string + if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil { + return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err) + } + for _, value := range valSlice { + table.AddRow( + meta.Key, + value, + ) } - table.Render() } + table.Render() + } - if withDetail { - fmt.Printf("\n - Events :\n") - for _, event := range alert.Events { - alertEventTable(color.Output, event) - } + if withDetail { + fmt.Printf("\n - Events :\n") + for _, event := range alert.Events { + alertEventTable(color.Output, event) } } + return nil } -type cliAlerts struct{} +type cliAlerts struct{ + client *apiclient.ApiClient +} func NewCLIAlerts() *cliAlerts { return &cliAlerts{} } -func (cli cliAlerts) NewCommand() *cobra.Command { +func (cli *cliAlerts) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "alerts [action]", Short: "Manage alerts", @@ -228,7 +185,7 @@ func (cli cliAlerts) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("parsing api url %s: %w", apiURL, err) } - Client, err = apiclient.NewClient(&apiclient.Config{ + cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: csConfig.API.Client.Credentials.Login, Password: strfmt.Password(csConfig.API.Client.Credentials.Password), UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), @@ -251,7 +208,7 @@ func (cli cliAlerts) NewCommand() *cobra.Command { return cmd } -func (cli cliAlerts) NewListCmd() *cobra.Command { +func (cli *cliAlerts) NewListCmd() *cobra.Command { var alertListFilter = apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -345,12 +302,12 @@ cscli alerts list --type ban`, alertListFilter.Contains = new(bool) } - alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter) + alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) if err != nil { return fmt.Errorf("unable to list alerts: %v", err) } - err = AlertsToTable(alerts, printMachine) + err = alertsToTable(alerts, printMachine) if err != nil { return fmt.Errorf("unable to list alerts: %v", err) } @@ -376,7 +333,7 @@ cscli alerts list --type ban`, return cmd } -func (cli cliAlerts) NewDeleteCmd() *cobra.Command { +func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { var ActiveDecision *bool var AlertDeleteAll bool var delAlertByID string @@ -451,12 +408,12 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = Client.Alerts.Delete(context.Background(), alertDeleteFilter) + alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) if err != nil { return fmt.Errorf("unable to delete alerts : %v", err) } } else { - alerts, _, err = Client.Alerts.DeleteOne(context.Background(), delAlertByID) + alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) if err != nil { return fmt.Errorf("unable to delete alert: %v", err) } @@ -478,7 +435,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return cmd } -func (cli cliAlerts) NewInspectCmd() *cobra.Command { +func (cli *cliAlerts) NewInspectCmd() *cobra.Command { var details bool cmd := &cobra.Command{ Use: `inspect "alert_id"`, @@ -495,13 +452,13 @@ func (cli cliAlerts) NewInspectCmd() *cobra.Command { if err != nil { return fmt.Errorf("bad alert id %s", alertID) } - alert, _, err := Client.Alerts.GetByID(context.Background(), id) + alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) if err != nil { return fmt.Errorf("can't find alert with id %s: %s", alertID, err) } switch csConfig.Cscli.Output { case "human": - if err := DisplayOneAlert(alert, details); err != nil { + if err := displayOneAlert(alert, details); err != nil { continue } case "json": @@ -528,7 +485,7 @@ func (cli cliAlerts) NewInspectCmd() *cobra.Command { return cmd } -func (cli cliAlerts) NewFlushCmd() *cobra.Command { +func (cli *cliAlerts) NewFlushCmd() *cobra.Command { var maxItems int var maxAge string cmd := &cobra.Command{ @@ -542,12 +499,12 @@ func (cli cliAlerts) NewFlushCmd() *cobra.Command { if err := require.LAPI(csConfig); err != nil { return err } - dbClient, err = database.NewClient(csConfig.DbConfig) + db, err := database.NewClient(csConfig.DbConfig) if err != nil { return fmt.Errorf("unable to create new database client: %s", err) } log.Info("Flushing alerts. !! This may take a long time !!") - err = dbClient.FlushAlerts(maxAge, maxItems) + err = db.FlushAlerts(maxAge, maxItems) if err != nil { return fmt.Errorf("unable to flush alerts: %s", err) } From e6f5d157b8a84ff68393a1446258eea093fc99ad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:25:29 +0100 Subject: [PATCH 003/318] refact "cscli hub" (#2800) --- cmd/crowdsec-cli/hub.go | 84 ++++++++++++++++++++-------------------- cmd/crowdsec-cli/main.go | 2 +- 2 files changed, 44 insertions(+), 42 deletions(-) diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 3a2913f0513..d3ce380bb6f 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,13 +13,17 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct{} +type cliHub struct{ + cfg configGetter +} -func NewCLIHub() *cliHub { - return &cliHub{} +func NewCLIHub(getconfig configGetter) *cliHub { + return &cliHub{ + cfg: getconfig, + } } -func (cli cliHub) NewCommand() *cobra.Command { +func (cli *cliHub) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "hub [action]", Short: "Manage hub index", @@ -34,23 +38,16 @@ cscli hub upgrade`, DisableAutoGenTag: true, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewUpdateCmd()) - cmd.AddCommand(cli.NewUpgradeCmd()) - cmd.AddCommand(cli.NewTypesCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newUpdateCmd()) + cmd.AddCommand(cli.newUpgradeCmd()) + cmd.AddCommand(cli.newTypesCmd()) return cmd } -func (cli cliHub) list(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - all, err := flags.GetBool("all") - if err != nil { - return err - } - - hub, err := require.Hub(csConfig, nil, log.StandardLogger()) +func (cli *cliHub) list(all bool) error { + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err } @@ -80,24 +77,28 @@ func (cli cliHub) list(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewListCmd() *cobra.Command { +func (cli *cliHub) newListCmd() *cobra.Command { + var all bool + cmd := &cobra.Command{ Use: "list [-a]", Short: "List all installed configurations", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.list, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.list(all) + }, } flags := cmd.Flags() - flags.BoolP("all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") return cmd } -func (cli cliHub) update(cmd *cobra.Command, args []string) error { - local := csConfig.Hub - remote := require.RemoteHub(csConfig) +func (cli *cliHub) update() error { + local := cli.cfg().Hub + remote := require.RemoteHub(cli.cfg()) // don't use require.Hub because if there is no index file, it would fail hub, err := cwhub.NewHub(local, remote, true, log.StandardLogger()) @@ -112,7 +113,7 @@ func (cli cliHub) update(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewUpdateCmd() *cobra.Command { +func (cli *cliHub) newUpdateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "update", Short: "Download the latest index (catalog of available configurations)", @@ -121,21 +122,16 @@ Fetches the .index.json file from the hub, containing the list of available conf `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.update, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.update() + }, } return cmd } -func (cli cliHub) upgrade(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) +func (cli *cliHub) upgrade(force bool) error { + hub, err := require.Hub(cli.cfg(), require.RemoteHub(cli.cfg()), log.StandardLogger()) if err != nil { return err } @@ -167,7 +163,9 @@ func (cli cliHub) upgrade(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewUpgradeCmd() *cobra.Command { +func (cli *cliHub) newUpgradeCmd() *cobra.Command { + var force bool + cmd := &cobra.Command{ Use: "upgrade", Short: "Upgrade all configurations to their latest version", @@ -176,17 +174,19 @@ Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.upgrade, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.upgrade(force) + }, } flags := cmd.Flags() - flags.Bool("force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") return cmd } -func (cli cliHub) types(cmd *cobra.Command, args []string) error { - switch csConfig.Cscli.Output { +func (cli *cliHub) types() error { + switch cli.cfg().Cscli.Output { case "human": s, err := yaml.Marshal(cwhub.ItemTypes) if err != nil { @@ -210,7 +210,7 @@ func (cli cliHub) types(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewTypesCmd() *cobra.Command { +func (cli *cliHub) newTypesCmd() *cobra.Command { cmd := &cobra.Command{ Use: "types", Short: "List supported item types", @@ -219,7 +219,9 @@ List the types of supported hub items. `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.types, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.types() + }, } return cmd diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 91e31a9778c..acbced2e5a3 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -194,7 +194,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewConfigCmd()) - cmd.AddCommand(NewCLIHub().NewCommand()) + cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) cmd.AddCommand(NewMetricsCmd()) cmd.AddCommand(NewCLIDashboard().NewCommand()) cmd.AddCommand(NewCLIDecisions().NewCommand()) From af14f1085f1d31c75364e040a9d317cff22a093d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:26:06 +0100 Subject: [PATCH 004/318] refact "cscli " (#2782) --- cmd/crowdsec-cli/itemcli.go | 196 +++++++++++++----------------------- cmd/crowdsec-cli/items.go | 2 +- 2 files changed, 72 insertions(+), 126 deletions(-) diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 5b0ad13ffe6..4f3dc40ae04 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -51,33 +51,16 @@ func (cli cliItem) NewCommand() *cobra.Command { DisableAutoGenTag: true, } - cmd.AddCommand(cli.NewInstallCmd()) - cmd.AddCommand(cli.NewRemoveCmd()) - cmd.AddCommand(cli.NewUpgradeCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewListCmd()) + cmd.AddCommand(cli.newInstallCmd()) + cmd.AddCommand(cli.newRemoveCmd()) + cmd.AddCommand(cli.newUpgradeCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newListCmd()) return cmd } -func (cli cliItem) Install(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - downloadOnly, err := flags.GetBool("download-only") - if err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - ignoreError, err := flags.GetBool("ignore") - if err != nil { - return err - } - +func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error { hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) if err != nil { return err @@ -110,7 +93,13 @@ func (cli cliItem) Install(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewInstallCmd() *cobra.Command { +func (cli cliItem) newInstallCmd() *cobra.Command { + var ( + downloadOnly bool + force bool + ignoreError bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.installHelp.use, "install [item]..."), Short: coalesce.String(cli.installHelp.short, fmt.Sprintf("Install given %s", cli.oneOrMore)), @@ -121,13 +110,15 @@ func (cli cliItem) NewInstallCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete) }, - RunE: cli.Install, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(args, downloadOnly, force, ignoreError) + }, } flags := cmd.Flags() - flags.BoolP("download-only", "d", false, "Only download packages, don't enable") - flags.Bool("force", false, "Force install: overwrite tainted and outdated files") - flags.Bool("ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name)) + flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") + flags.BoolVar(&ignoreError, "ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name)) return cmd } @@ -145,24 +136,7 @@ func istalledParentNames(item *cwhub.Item) []string { return ret } -func (cli cliItem) Remove(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - purge, err := flags.GetBool("purge") - if err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - all, err := flags.GetBool("all") - if err != nil { - return err - } - +func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { hub, err := require.Hub(csConfig, nil, log.StandardLogger()) if err != nil { return err @@ -243,7 +217,13 @@ func (cli cliItem) Remove(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewRemoveCmd() *cobra.Command { +func (cli cliItem) newRemoveCmd() *cobra.Command { + var ( + purge bool + force bool + all bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.removeHelp.use, "remove [item]..."), Short: coalesce.String(cli.removeHelp.short, fmt.Sprintf("Remove given %s", cli.oneOrMore)), @@ -254,30 +234,20 @@ func (cli cliItem) NewRemoveCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: cli.Remove, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.remove(args, purge, force, all) + }, } flags := cmd.Flags() - flags.Bool("purge", false, "Delete source file too") - flags.Bool("force", false, "Force remove: remove tainted and outdated files") - flags.Bool("all", false, fmt.Sprintf("Remove all the %s", cli.name)) + flags.BoolVar(&purge, "purge", false, "Delete source file too") + flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") + flags.BoolVar(&all, "all", false, fmt.Sprintf("Remove all the %s", cli.name)) return cmd } -func (cli cliItem) Upgrade(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - all, err := flags.GetBool("all") - if err != nil { - return err - } - +func (cli cliItem) upgrade(args []string, force bool, all bool) error { hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) if err != nil { return err @@ -341,7 +311,12 @@ func (cli cliItem) Upgrade(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewUpgradeCmd() *cobra.Command { +func (cli cliItem) newUpgradeCmd() *cobra.Command { + var ( + all bool + force bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."), Short: coalesce.String(cli.upgradeHelp.short, fmt.Sprintf("Upgrade given %s", cli.oneOrMore)), @@ -351,43 +326,27 @@ func (cli cliItem) NewUpgradeCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: cli.Upgrade, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(args, force, all) + }, } flags := cmd.Flags() - flags.BoolP("all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name)) - flags.Bool("force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVarP(&all, "all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name)) + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") return cmd } -func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - url, err := flags.GetString("url") - if err != nil { - return err +func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error { + if rev && !diff { + return fmt.Errorf("--rev can only be used with --diff") } if url != "" { csConfig.Cscli.PrometheusUrl = url } - diff, err := flags.GetBool("diff") - if err != nil { - return err - } - - rev, err := flags.GetBool("rev") - if err != nil { - return err - } - - noMetrics, err := flags.GetBool("no-metrics") - if err != nil { - return err - } - remote := (*cwhub.RemoteHubCfg)(nil) if diff { @@ -411,7 +370,7 @@ func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error { continue } - if err = InspectItem(item, !noMetrics); err != nil { + if err = inspectItem(item, !noMetrics); err != nil { return err } @@ -425,7 +384,14 @@ func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewInspectCmd() *cobra.Command { +func (cli cliItem) newInspectCmd() *cobra.Command { + var ( + url string + diff bool + rev bool + noMetrics bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."), Short: coalesce.String(cli.inspectHelp.short, fmt.Sprintf("Inspect given %s", cli.oneOrMore)), @@ -436,45 +402,21 @@ func (cli cliItem) NewInspectCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - PreRunE: func(cmd *cobra.Command, _ []string) error { - flags := cmd.Flags() - - diff, err := flags.GetBool("diff") - if err != nil { - return err - } - - rev, err := flags.GetBool("rev") - if err != nil { - return err - } - - if rev && !diff { - return fmt.Errorf("--rev can only be used with --diff") - } - - return nil + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(args, url, diff, rev, noMetrics) }, - RunE: cli.Inspect, } flags := cmd.Flags() - flags.StringP("url", "u", "", "Prometheus url") - flags.Bool("diff", false, "Show diff with latest version (for tainted items)") - flags.Bool("rev", false, "Reverse diff output") - flags.Bool("no-metrics", false, "Don't show metrics (when cscli.output=human)") + flags.StringVarP(&url, "url", "u", "", "Prometheus url") + flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") + flags.BoolVar(&rev, "rev", false, "Reverse diff output") + flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") return cmd } -func (cli cliItem) List(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - all, err := flags.GetBool("all") - if err != nil { - return err - } - +func (cli cliItem) list(args []string, all bool) error { hub, err := require.Hub(csConfig, nil, log.StandardLogger()) if err != nil { return err @@ -494,18 +436,22 @@ func (cli cliItem) List(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewListCmd() *cobra.Command { +func (cli cliItem) newListCmd() *cobra.Command { + var all bool + cmd := &cobra.Command{ Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"), Short: coalesce.String(cli.listHelp.short, fmt.Sprintf("List %s", cli.oneOrMore)), Long: coalesce.String(cli.listHelp.long, fmt.Sprintf("List of installed/available/specified %s", cli.name)), Example: cli.listHelp.example, DisableAutoGenTag: true, - RunE: cli.List, + RunE: func(_ *cobra.Command, args []string) error { + return cli.list(args, all) + }, } flags := cmd.Flags() - flags.BoolP("all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") return cmd } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index 851be553f15..ea6d8a25631 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -143,7 +143,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item return nil } -func InspectItem(item *cwhub.Item, showMetrics bool) error { +func inspectItem(item *cwhub.Item, showMetrics bool) error { switch csConfig.Cscli.Output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) From 825c08aa9df2a5889872312eb17163e0effbeb90 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:26:46 +0100 Subject: [PATCH 005/318] refact "cscli simulation" (#2801) --- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/simulation.go | 168 ++++++++++++++++----------------- 2 files changed, 85 insertions(+), 85 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index acbced2e5a3..98d10a6bfb6 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -199,7 +199,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDashboard().NewCommand()) cmd.AddCommand(NewCLIDecisions().NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) - cmd.AddCommand(NewCLISimulation().NewCommand()) + cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) cmd.AddCommand(NewCLIMachines(getconfig).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 99dac7c17f2..19eb1b99d01 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -13,13 +13,17 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliSimulation struct{} +type cliSimulation struct{ + cfg configGetter +} -func NewCLISimulation() *cliSimulation { - return &cliSimulation{} +func NewCLISimulation(getconfig configGetter) *cliSimulation { + return &cliSimulation{ + cfg: getconfig, + } } -func (cli cliSimulation) NewCommand() *cobra.Command { +func (cli *cliSimulation) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "simulation [command]", Short: "Manage simulation status of scenarios", @@ -27,16 +31,16 @@ func (cli cliSimulation) NewCommand() *cobra.Command { cscli simulation enable crowdsecurity/ssh-bf cscli simulation disable crowdsecurity/ssh-bf`, DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := csConfig.LoadSimulation(); err != nil { - log.Fatal(err) + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if err := cli.cfg().LoadSimulation(); err != nil { + return err } - if csConfig.Cscli.SimulationConfig == nil { + if cli.cfg().Cscli.SimulationConfig == nil { return fmt.Errorf("no simulation configured") } return nil }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { + PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { log.Infof(ReloadMessage()) } @@ -52,7 +56,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, return cmd } -func (cli cliSimulation) NewEnableCmd() *cobra.Command { +func (cli *cliSimulation) NewEnableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -60,10 +64,10 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command { Short: "Enable the simulation, globally or on specified scenarios", Example: `cscli simulation enable`, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { - hub, err := require.Hub(csConfig, nil, nil) + RunE: func(cmd *cobra.Command, args []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { - log.Fatal(err) + return err } if len(args) > 0 { @@ -76,37 +80,35 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command { if !item.State.Installed { log.Warningf("'%s' isn't enabled", scenario) } - isExcluded := slices.Contains(csConfig.Cscli.SimulationConfig.Exclusions, scenario) - if *csConfig.Cscli.SimulationConfig.Simulation && !isExcluded { + isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario) + if *cli.cfg().Cscli.SimulationConfig.Simulation && !isExcluded { log.Warning("global simulation is already enabled") continue } - if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded { + if !*cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded { log.Warningf("simulation for '%s' already enabled", scenario) continue } - if *csConfig.Cscli.SimulationConfig.Simulation && isExcluded { - if err := removeFromExclusion(scenario); err != nil { - log.Fatal(err) - } + if *cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded { + cli.removeFromExclusion(scenario) log.Printf("simulation enabled for '%s'", scenario) continue } - if err := addToExclusion(scenario); err != nil { - log.Fatal(err) - } + cli.addToExclusion(scenario) log.Printf("simulation mode for '%s' enabled", scenario) } - if err := dumpSimulationFile(); err != nil { - log.Fatalf("simulation enable: %s", err) + if err := cli.dumpSimulationFile(); err != nil { + return fmt.Errorf("simulation enable: %s", err) } } else if forceGlobalSimulation { - if err := enableGlobalSimulation(); err != nil { - log.Fatalf("unable to enable global simulation mode : %s", err) + if err := cli.enableGlobalSimulation(); err != nil { + return fmt.Errorf("unable to enable global simulation mode: %s", err) } } else { printHelp(cmd) } + + return nil }, } cmd.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Enable global simulation (reverse mode)") @@ -114,7 +116,7 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command { return cmd } -func (cli cliSimulation) NewDisableCmd() *cobra.Command { +func (cli *cliSimulation) NewDisableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -122,18 +124,16 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command { Short: "Disable the simulation mode. Disable only specified scenarios", Example: `cscli simulation disable`, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { for _, scenario := range args { - isExcluded := slices.Contains(csConfig.Cscli.SimulationConfig.Exclusions, scenario) - if !*csConfig.Cscli.SimulationConfig.Simulation && !isExcluded { + isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario) + if !*cli.cfg().Cscli.SimulationConfig.Simulation && !isExcluded { log.Warningf("%s isn't in simulation mode", scenario) continue } - if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded { - if err := removeFromExclusion(scenario); err != nil { - log.Fatal(err) - } + if !*cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded { + cli.removeFromExclusion(scenario) log.Printf("simulation mode for '%s' disabled", scenario) continue } @@ -141,21 +141,21 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command { log.Warningf("simulation mode is enabled but is already disable for '%s'", scenario) continue } - if err := addToExclusion(scenario); err != nil { - log.Fatal(err) - } + cli.addToExclusion(scenario) log.Printf("simulation mode for '%s' disabled", scenario) } - if err := dumpSimulationFile(); err != nil { - log.Fatalf("simulation disable: %s", err) + if err := cli.dumpSimulationFile(); err != nil { + return fmt.Errorf("simulation disable: %s", err) } } else if forceGlobalSimulation { - if err := disableGlobalSimulation(); err != nil { - log.Fatalf("unable to disable global simulation mode : %s", err) + if err := cli.disableGlobalSimulation(); err != nil { + return fmt.Errorf("unable to disable global simulation mode: %s", err) } } else { printHelp(cmd) } + + return nil }, } cmd.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Disable global simulation (reverse mode)") @@ -163,16 +163,14 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command { return cmd } -func (cli cliSimulation) NewStatusCmd() *cobra.Command { +func (cli *cliSimulation) NewStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Show simulation mode status", Example: `cscli simulation status`, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { - if err := simulationStatus(); err != nil { - log.Fatal(err) - } + Run: func(_ *cobra.Command, _ []string) { + cli.status() }, PersistentPostRun: func(cmd *cobra.Command, args []string) { }, @@ -181,29 +179,29 @@ func (cli cliSimulation) NewStatusCmd() *cobra.Command { return cmd } -func addToExclusion(name string) error { - csConfig.Cscli.SimulationConfig.Exclusions = append(csConfig.Cscli.SimulationConfig.Exclusions, name) - return nil +func (cli *cliSimulation) addToExclusion(name string) { + cfg := cli.cfg() + cfg.Cscli.SimulationConfig.Exclusions = append(cfg.Cscli.SimulationConfig.Exclusions, name) } -func removeFromExclusion(name string) error { - index := slices.Index(csConfig.Cscli.SimulationConfig.Exclusions, name) +func (cli *cliSimulation) removeFromExclusion(name string) { + cfg := cli.cfg() + index := slices.Index(cfg.Cscli.SimulationConfig.Exclusions, name) // Remove element from the slice - csConfig.Cscli.SimulationConfig.Exclusions[index] = csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] - csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] = "" - csConfig.Cscli.SimulationConfig.Exclusions = csConfig.Cscli.SimulationConfig.Exclusions[:len(csConfig.Cscli.SimulationConfig.Exclusions)-1] - - return nil + cfg.Cscli.SimulationConfig.Exclusions[index] = cfg.Cscli.SimulationConfig.Exclusions[len(cfg.Cscli.SimulationConfig.Exclusions)-1] + cfg.Cscli.SimulationConfig.Exclusions[len(cfg.Cscli.SimulationConfig.Exclusions)-1] = "" + cfg.Cscli.SimulationConfig.Exclusions = cfg.Cscli.SimulationConfig.Exclusions[:len(cfg.Cscli.SimulationConfig.Exclusions)-1] } -func enableGlobalSimulation() error { - csConfig.Cscli.SimulationConfig.Simulation = new(bool) - *csConfig.Cscli.SimulationConfig.Simulation = true - csConfig.Cscli.SimulationConfig.Exclusions = []string{} +func (cli *cliSimulation) enableGlobalSimulation() error { + cfg := cli.cfg() + cfg.Cscli.SimulationConfig.Simulation = new(bool) + *cfg.Cscli.SimulationConfig.Simulation = true + cfg.Cscli.SimulationConfig.Exclusions = []string{} - if err := dumpSimulationFile(); err != nil { - log.Fatalf("unable to dump simulation file: %s", err) + if err := cli.dumpSimulationFile(); err != nil { + return fmt.Errorf("unable to dump simulation file: %s", err) } log.Printf("global simulation: enabled") @@ -211,59 +209,61 @@ func enableGlobalSimulation() error { return nil } -func dumpSimulationFile() error { - newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig) +func (cli *cliSimulation) dumpSimulationFile() error { + cfg := cli.cfg() + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal simulation configuration: %s", err) } - err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("write simulation config in '%s' failed: %s", csConfig.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err) } - log.Debugf("updated simulation file %s", csConfig.ConfigPaths.SimulationFilePath) + log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath) return nil } -func disableGlobalSimulation() error { - csConfig.Cscli.SimulationConfig.Simulation = new(bool) - *csConfig.Cscli.SimulationConfig.Simulation = false +func (cli *cliSimulation) disableGlobalSimulation() error { + cfg := cli.cfg() + cfg.Cscli.SimulationConfig.Simulation = new(bool) + *cfg.Cscli.SimulationConfig.Simulation = false - csConfig.Cscli.SimulationConfig.Exclusions = []string{} - newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig) + cfg.Cscli.SimulationConfig.Exclusions = []string{} + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal new simulation configuration: %s", err) } - err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s' : %s", csConfig.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("unable to write new simulation config in '%s' : %s", cfg.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") return nil } -func simulationStatus() error { - if csConfig.Cscli.SimulationConfig == nil { +func (cli *cliSimulation) status() { + cfg := cli.cfg() + if cfg.Cscli.SimulationConfig == nil { log.Printf("global simulation: disabled (configuration file is missing)") - return nil + return } - if *csConfig.Cscli.SimulationConfig.Simulation { + if *cfg.Cscli.SimulationConfig.Simulation { log.Println("global simulation: enabled") - if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 { + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios not in simulation mode :") - for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions { + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } } } else { log.Println("global simulation: disabled") - if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 { + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios in simulation mode :") - for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions { + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } } } - return nil } From 45c669fb654b289ccd9125c852451551ce07c547 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:27:00 +0100 Subject: [PATCH 006/318] refact "cscli papi" (#2802) --- cmd/crowdsec-cli/main.go | 6 +--- cmd/crowdsec-cli/papi.go | 67 +++++++++++++++++++++++----------------- 2 files changed, 40 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 98d10a6bfb6..a642098768b 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -210,7 +210,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications().NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) - cmd.AddCommand(NewCLIPapi().NewCommand()) + cmd.AddCommand(NewCLIPapi(getconfig).NewCommand()) cmd.AddCommand(NewCLICollection().NewCommand()) cmd.AddCommand(NewCLIParser().NewCommand()) cmd.AddCommand(NewCLIScenario().NewCommand()) @@ -223,10 +223,6 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewSetupCmd()) } - if fflag.PapiClient.IsEnabled() { - cmd.AddCommand(NewCLIPapi().NewCommand()) - } - if err := cmd.Execute(); err != nil { log.Fatal(err) } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 606d8b415a0..43dcc30e0db 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "time" log "github.com/sirupsen/logrus" @@ -15,26 +16,31 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) -type cliPapi struct {} +type cliPapi struct { + cfg configGetter +} -func NewCLIPapi() *cliPapi { - return &cliPapi{} +func NewCLIPapi(getconfig configGetter) *cliPapi { + return &cliPapi{ + cfg: getconfig, + } } -func (cli cliPapi) NewCommand() *cobra.Command { - var cmd = &cobra.Command{ +func (cli *cliPapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "papi [action]", Short: "Manage interaction with Polling API (PAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := require.CAPI(csConfig); err != nil { + if err := require.CAPI(cfg); err != nil { return err } - if err := require.PAPI(csConfig); err != nil { + if err := require.PAPI(cfg); err != nil { return err } return nil @@ -47,35 +53,36 @@ func (cli cliPapi) NewCommand() *cobra.Command { return cmd } -func (cli cliPapi) NewStatusCmd() *cobra.Command { +func (cli *cliPapi) NewStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { var err error - dbClient, err = database.NewClient(csConfig.DbConfig) + cfg := cli.cfg() + dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - log.Fatalf("unable to initialize database client : %s", err) + return fmt.Errorf("unable to initialize database client: %s", err) } - apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { - log.Fatalf("unable to initialize API client : %s", err) + return fmt.Errorf("unable to initialize API client: %s", err) } - papi, err := apiserver.NewPAPI(apic, dbClient, csConfig.API.Server.ConsoleConfig, log.GetLevel()) + papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { - log.Fatalf("unable to initialize PAPI client : %s", err) + return fmt.Errorf("unable to initialize PAPI client: %s", err) } perms, err := papi.GetPermissions() if err != nil { - log.Fatalf("unable to get PAPI permissions: %s", err) + return fmt.Errorf("unable to get PAPI permissions: %s", err) } var lastTimestampStr *string lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey) @@ -90,45 +97,48 @@ func (cli cliPapi) NewStatusCmd() *cobra.Command { for _, sub := range perms.Categories { log.Infof(" - %s", sub) } + + return nil }, } return cmd } -func (cli cliPapi) NewSyncCmd() *cobra.Command { +func (cli *cliPapi) NewSyncCmd() *cobra.Command { cmd := &cobra.Command{ Use: "sync", Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { var err error + cfg := cli.cfg() t := tomb.Tomb{} - dbClient, err = database.NewClient(csConfig.DbConfig) + + dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - log.Fatalf("unable to initialize database client : %s", err) + return fmt.Errorf("unable to initialize database client: %s", err) } - apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists) - + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { - log.Fatalf("unable to initialize API client : %s", err) + return fmt.Errorf("unable to initialize API client: %s", err) } t.Go(apic.Push) - papi, err := apiserver.NewPAPI(apic, dbClient, csConfig.API.Server.ConsoleConfig, log.GetLevel()) - + papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { - log.Fatalf("unable to initialize PAPI client : %s", err) + return fmt.Errorf("unable to initialize PAPI client: %s", err) } + t.Go(papi.SyncDecisions) err = papi.PullOnce(time.Time{}, true) if err != nil { - log.Fatalf("unable to sync decisions: %s", err) + return fmt.Errorf("unable to sync decisions: %s", err) } log.Infof("Sending acknowledgements to CAPI") @@ -138,6 +148,7 @@ func (cli cliPapi) NewSyncCmd() *cobra.Command { t.Wait() time.Sleep(5 * time.Second) //FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + return nil }, } From f5fbe4a200105af8b84a47467addc4253a7c0c1b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:27:15 +0100 Subject: [PATCH 007/318] refact "cscli dashboard" (#2803) --- cmd/crowdsec-cli/dashboard.go | 100 ++++++++++++---------- cmd/crowdsec-cli/dashboard_unsupported.go | 12 ++- cmd/crowdsec-cli/main.go | 2 +- 3 files changed, 64 insertions(+), 50 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index a3701c4dbbb..f32b9f061f9 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -43,14 +43,17 @@ var ( // information needed to set up a random password on user's behalf ) -type cliDashboard struct{} +type cliDashboard struct{ + cfg configGetter +} -func NewCLIDashboard() *cliDashboard { - return &cliDashboard{} +func NewCLIDashboard(getconfig configGetter) *cliDashboard { + return &cliDashboard{ + cfg: getconfig, + } } -func (cli cliDashboard) NewCommand() *cobra.Command { - /* ---- UPDATE COMMAND */ +func (cli *cliDashboard) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "dashboard [command]", Short: "Manage your metabase dashboard container [requires local API]", @@ -65,8 +68,9 @@ cscli dashboard start cscli dashboard stop cscli dashboard remove `, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } @@ -74,13 +78,13 @@ cscli dashboard remove return err } - metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder) + metabaseConfigFolderPath := filepath.Join(cfg.ConfigPaths.ConfigDir, metabaseConfigFolder) metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile) if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil { return err } - if err := require.DB(csConfig); err != nil { + if err := require.DB(cfg); err != nil { return err } @@ -99,16 +103,16 @@ cscli dashboard remove }, } - cmd.AddCommand(cli.NewSetupCmd()) - cmd.AddCommand(cli.NewStartCmd()) - cmd.AddCommand(cli.NewStopCmd()) - cmd.AddCommand(cli.NewShowPasswordCmd()) - cmd.AddCommand(cli.NewRemoveCmd()) + cmd.AddCommand(cli.newSetupCmd()) + cmd.AddCommand(cli.newStartCmd()) + cmd.AddCommand(cli.newStopCmd()) + cmd.AddCommand(cli.newShowPasswordCmd()) + cmd.AddCommand(cli.newRemoveCmd()) return cmd } -func (cli cliDashboard) NewSetupCmd() *cobra.Command { +func (cli *cliDashboard) newSetupCmd() *cobra.Command { var force bool cmd := &cobra.Command{ @@ -122,9 +126,9 @@ cscli dashboard setup cscli dashboard setup --listen 0.0.0.0 cscli dashboard setup -l 0.0.0.0 -p 443 --password `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if metabaseDbPath == "" { - metabaseDbPath = csConfig.ConfigPaths.DataDir + metabaseDbPath = cli.cfg().ConfigPaths.DataDir } if metabasePassword == "" { @@ -145,10 +149,10 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err != nil { return err } - if err = chownDatabase(dockerGroup.Gid); err != nil { + if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) if err != nil { return err } @@ -164,26 +168,28 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password return nil }, } - cmd.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files") - cmd.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container") - cmd.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") - cmd.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") - cmd.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") - cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") - //cmd.Flags().StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") - cmd.Flags().StringVar(&metabasePassword, "password", "", "metabase password") + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force setup : override existing files") + flags.StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container") + flags.StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") + flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") + flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") + flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") + //flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") + flags.StringVar(&metabasePassword, "password", "", "metabase password") return cmd } -func (cli cliDashboard) NewStartCmd() *cobra.Command { +func (cli *cliDashboard) newStartCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start", Short: "Start the metabase container.", Long: `Stats the metabase container using docker.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID) if err != nil { return err @@ -200,19 +206,20 @@ func (cli cliDashboard) NewStartCmd() *cobra.Command { return nil }, } + cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") return cmd } -func (cli cliDashboard) NewStopCmd() *cobra.Command { +func (cli *cliDashboard) newStopCmd() *cobra.Command { cmd := &cobra.Command{ Use: "stop", Short: "Stops the metabase container.", Long: `Stops the metabase container using docker.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if err := metabase.StopContainer(metabaseContainerID); err != nil { return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err) } @@ -223,12 +230,12 @@ func (cli cliDashboard) NewStopCmd() *cobra.Command { return cmd } -func (cli cliDashboard) NewShowPasswordCmd() *cobra.Command { +func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { cmd := &cobra.Command{Use: "show-password", Short: "displays password of metabase.", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { m := metabase.Metabase{} if err := m.LoadConfig(metabaseConfigPath); err != nil { return err @@ -241,7 +248,7 @@ func (cli cliDashboard) NewShowPasswordCmd() *cobra.Command { return cmd } -func (cli cliDashboard) NewRemoveCmd() *cobra.Command { +func (cli *cliDashboard) newRemoveCmd() *cobra.Command { var force bool cmd := &cobra.Command{ @@ -254,7 +261,7 @@ func (cli cliDashboard) NewRemoveCmd() *cobra.Command { cscli dashboard remove cscli dashboard remove --force `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if !forceYes { var answer bool prompt := &survey.Confirm{ @@ -291,8 +298,8 @@ cscli dashboard remove --force } log.Infof("container %s stopped & removed", metabaseContainerID) } - log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir) - if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil { + log.Debugf("Removing metabase db %s", cli.cfg().ConfigPaths.DataDir) + if err := metabase.RemoveDatabase(cli.cfg().ConfigPaths.DataDir); err != nil { log.Warnf("failed to remove metabase internal db : %s", err) } if force { @@ -309,8 +316,10 @@ cscli dashboard remove --force return nil }, } - cmd.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image") - cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Remove also the metabase image") + flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") return cmd } @@ -431,22 +440,23 @@ func checkGroups(forceYes *bool) (*user.Group, error) { return user.LookupGroup(crowdsecGroup) } -func chownDatabase(gid string) error { +func (cli *cliDashboard) chownDatabase(gid string) error { + cfg := cli.cfg() intID, err := strconv.Atoi(gid) if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } - if stat, err := os.Stat(csConfig.DbConfig.DbPath); !os.IsNotExist(err) { + if stat, err := os.Stat(cfg.DbConfig.DbPath); !os.IsNotExist(err) { info := stat.Sys() - if err := os.Chown(csConfig.DbConfig.DbPath, int(info.(*syscall.Stat_t).Uid), intID); err != nil { - return fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err) + if err := os.Chown(cfg.DbConfig.DbPath, int(info.(*syscall.Stat_t).Uid), intID); err != nil { + return fmt.Errorf("unable to chown sqlite db file '%s': %s", cfg.DbConfig.DbPath, err) } } - if csConfig.DbConfig.Type == "sqlite" && csConfig.DbConfig.UseWal != nil && *csConfig.DbConfig.UseWal { + if cfg.DbConfig.Type == "sqlite" && cfg.DbConfig.UseWal != nil && *cfg.DbConfig.UseWal { for _, ext := range []string{"-wal", "-shm"} { - file := csConfig.DbConfig.DbPath + ext + file := cfg.DbConfig.DbPath + ext if stat, err := os.Stat(file); !os.IsNotExist(err) { info := stat.Sys() if err := os.Chown(file, int(info.(*syscall.Stat_t).Uid), intID); err != nil { diff --git a/cmd/crowdsec-cli/dashboard_unsupported.go b/cmd/crowdsec-cli/dashboard_unsupported.go index 072ff525b19..4cf8e18b503 100644 --- a/cmd/crowdsec-cli/dashboard_unsupported.go +++ b/cmd/crowdsec-cli/dashboard_unsupported.go @@ -9,17 +9,21 @@ import ( "github.com/spf13/cobra" ) -type cliDashboard struct{} +type cliDashboard struct{ + cfg configGetter +} -func NewCLIDashboard() *cliDashboard { - return &cliDashboard{} +func NewCLIDashboard(getconfig configGetter) *cliDashboard { + return &cliDashboard{ + cfg: getconfig, + } } func (cli cliDashboard) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "dashboard", DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { log.Infof("Dashboard command is disabled on %s", runtime.GOOS) }, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index a642098768b..055c38e2788 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -196,7 +196,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewConfigCmd()) cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) cmd.AddCommand(NewMetricsCmd()) - cmd.AddCommand(NewCLIDashboard().NewCommand()) + cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) cmd.AddCommand(NewCLIDecisions().NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) From 4160bb8102a07f7ea4d96098821f42951280dd22 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 22:36:21 +0100 Subject: [PATCH 008/318] refact "cscli decisions" (#2804) * refact "cscli decisions" * CI: relax mysql test timing * lint --- cmd/crowdsec-cli/dashboard.go | 3 +- cmd/crowdsec-cli/decisions.go | 57 ++++++++++++++++------------ cmd/crowdsec-cli/decisions_import.go | 4 +- cmd/crowdsec-cli/decisions_table.go | 6 ++- cmd/crowdsec-cli/flag.go | 1 + cmd/crowdsec-cli/machines.go | 16 ++++++-- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/papi.go | 6 +-- cmd/crowdsec-cli/simulation.go | 11 +++++- cmd/crowdsec-cli/support.go | 30 ++++++++++++--- cmd/crowdsec-cli/utils.go | 4 +- pkg/csconfig/api.go | 3 +- pkg/csconfig/config.go | 4 +- test/bats/97_ipv6_single.bats | 2 +- 14 files changed, 103 insertions(+), 48 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index f32b9f061f9..64cb7577e89 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -176,7 +176,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") - //flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") + // flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") flags.StringVar(&metabasePassword, "password", "", "metabase password") return cmd @@ -443,6 +443,7 @@ func checkGroups(forceYes *bool) (*user.Group, error) { func (cli *cliDashboard) chownDatabase(gid string) error { cfg := cli.cfg() intID, err := strconv.Atoi(gid) + if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 683f100d4f7..c5839ae0079 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -25,7 +25,7 @@ import ( var Client *apiclient.ApiClient -func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { +func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ spamLimit := make(map[string]bool) skipped := 0 @@ -49,7 +49,8 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error alertItem.Decisions = newDecisions } - if csConfig.Cscli.Output == "raw" { + switch cli.cfg().Cscli.Output { + case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "source", "ip", "reason", "action", "country", "as", "events_count", "expiration", "simulated", "alert_id"} @@ -89,21 +90,24 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error } csvwriter.Flush() - } else if csConfig.Cscli.Output == "json" { + case "json": if *alerts == nil { // avoid returning "null" in `json" // could be cleaner if we used slice of alerts directly fmt.Println("[]") return nil } + x, _ := json.MarshalIndent(alerts, "", " ") fmt.Printf("%s", string(x)) - } else if csConfig.Cscli.Output == "human" { + case "human": if len(*alerts) == 0 { fmt.Println("No active decisions") return nil } - decisionsTable(color.Output, alerts, printMachine) + + cli.decisionsTable(color.Output, alerts, printMachine) + if skipped > 0 { fmt.Printf("%d duplicated entries skipped\n", skipped) } @@ -113,13 +117,17 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error } -type cliDecisions struct {} +type cliDecisions struct { + cfg configGetter +} -func NewCLIDecisions() *cliDecisions { - return &cliDecisions{} +func NewCLIDecisions(getconfig configGetter) *cliDecisions { + return &cliDecisions{ + cfg: getconfig, + } } -func (cli cliDecisions) NewCommand() *cobra.Command { +func (cli *cliDecisions) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "decisions [action]", Short: "Manage decisions", @@ -130,16 +138,17 @@ func (cli cliDecisions) NewCommand() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - if err := csConfig.LoadAPIClient(); err != nil { + cfg := cli.cfg() + if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - password := strfmt.Password(csConfig.API.Client.Credentials.Password) - apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) + password := strfmt.Password(cfg.API.Client.Credentials.Password) + apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { - return fmt.Errorf("parsing api url %s: %w", csConfig.API.Client.Credentials.URL, err) + return fmt.Errorf("parsing api url %s: %w", cfg.API.Client.Credentials.URL, err) } Client, err = apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Client.Credentials.Login, + MachineID: cfg.API.Client.Credentials.Login, Password: password, UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiurl, @@ -152,15 +161,15 @@ func (cli cliDecisions) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewAddCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) - cmd.AddCommand(cli.NewImportCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newImportCmd()) return cmd } -func (cli cliDecisions) NewListCmd() *cobra.Command { +func (cli *cliDecisions) newListCmd() *cobra.Command { var filter = apiclient.AlertsListOpts{ ValueEquals: new(string), ScopeEquals: new(string), @@ -262,7 +271,7 @@ cscli decisions list -t ban return fmt.Errorf("unable to retrieve decisions: %w", err) } - err = DecisionsToTable(alerts, printMachine) + err = cli.decisionsToTable(alerts, printMachine) if err != nil { return fmt.Errorf("unable to print decisions: %w", err) } @@ -289,7 +298,7 @@ cscli decisions list -t ban return cmd } -func (cli cliDecisions) NewAddCmd() *cobra.Command { +func (cli *cliDecisions) newAddCmd() *cobra.Command { var ( addIP string addRange string @@ -325,7 +334,7 @@ cscli decisions add --scope username --value foobar createdAt := time.Now().UTC().Format(time.RFC3339) /*take care of shorthand options*/ - if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + if err = manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { return err } @@ -341,7 +350,7 @@ cscli decisions add --scope username --value foobar } if addReason == "" { - addReason = fmt.Sprintf("manual '%s' from '%s'", addType, csConfig.API.Client.Credentials.Login) + addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) } decision := models.Decision{ Duration: &addDuration, @@ -400,7 +409,7 @@ cscli decisions add --scope username --value foobar return cmd } -func (cli cliDecisions) NewDeleteCmd() *cobra.Command { +func (cli *cliDecisions) newDeleteCmd() *cobra.Command { var delFilter = apiclient.DecisionsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 2d7ee485bd1..45d1841a603 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -67,7 +67,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { } -func (cli cliDecisions) runImport(cmd *cobra.Command, args []string) error { +func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { flags := cmd.Flags() input, err := flags.GetString("input") @@ -236,7 +236,7 @@ func (cli cliDecisions) runImport(cmd *cobra.Command, args []string) error { } -func (cli cliDecisions) NewImportCmd() *cobra.Command { +func (cli *cliDecisions) newImportCmd() *cobra.Command { cmd := &cobra.Command{ Use: "import [options]", Short: "Import decisions from a file or pipe", diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/decisions_table.go index d8d5e032594..10021e4dd4b 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/decisions_table.go @@ -8,13 +8,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { +func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { t := newTable(out) t.SetRowLines(false) + header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} if printMachine { header = append(header, "Machine") } + t.SetHeaders(header...) for _, alertItem := range *alerts { @@ -22,6 +24,7 @@ func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachin if *alertItem.Simulated { *decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type) } + row := []string{ strconv.Itoa(int(decisionItem.ID)), *decisionItem.Origin, @@ -42,5 +45,6 @@ func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachin t.AddRow(row...) } } + t.Render() } diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/flag.go index 402302a1f64..e85f33d4467 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/flag.go @@ -18,6 +18,7 @@ func (p *MachinePassword) Set(v string) error { if len(v) > 72 { return errors.New("password too long (max 72 characters)") } + *p = MachinePassword(v) return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 0cabccf76f5..1819bdcf5fb 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -45,6 +45,7 @@ func generatePassword(length int) string { if err != nil { log.Fatalf("failed getting data from prng for password generation : %s", err) } + buf[i] = charset[rInt.Int64()] } @@ -59,12 +60,14 @@ func generateIDPrefix() (string, error) { if err == nil { return prefix, nil } + log.Debugf("failed to get machine-id with usual files: %s", err) bID, err := uuid.NewRandom() if err == nil { return bID.String(), nil } + return "", fmt.Errorf("generating machine id: %w", err) } @@ -75,11 +78,14 @@ func generateID(prefix string) (string, error) { if prefix == "" { prefix, err = generateIDPrefix() } + if err != nil { return "", err } + prefix = strings.ReplaceAll(prefix, "-", "")[:32] suffix := generatePassword(16) + return prefix + suffix, nil } @@ -289,6 +295,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri if !autoAdd { return fmt.Errorf("please specify a password with --password or use --auto") } + machinePassword = generatePassword(passwordLength) } else if machinePassword == "" && interactive { qs := &survey.Password{ @@ -328,10 +335,10 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri } if dumpFile != "" && dumpFile != "-" { - err = os.WriteFile(dumpFile, apiConfigDump, 0o600) - if err != nil { + if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err) } + fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) } else { fmt.Print(string(apiConfigDump)) @@ -359,11 +366,11 @@ func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplet func (cli *cliMachines) delete(machines []string) error { for _, machineID := range machines { - err := cli.db.DeleteWatcher(machineID) - if err != nil { + if err := cli.db.DeleteWatcher(machineID); err != nil { log.Errorf("unable to delete machine '%s': %s", machineID, err) return nil } + log.Infof("machine '%s' deleted successfully", machineID) } @@ -473,6 +480,7 @@ func (cli *cliMachines) validate(machineID string) error { if err := cli.db.ValidateMachine(machineID); err != nil { return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) } + log.Infof("machine '%s' validated successfully", machineID) return nil diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 055c38e2788..1a7c84e3c78 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -157,8 +157,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning") cmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error") cmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace") - cmd.PersistentFlags().StringVar(&flagBranch, "branch", "", "Override hub branch on github") + if err := cmd.PersistentFlags().MarkHidden("branch"); err != nil { log.Fatalf("failed to hide flag: %s", err) } @@ -197,7 +197,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) cmd.AddCommand(NewMetricsCmd()) cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) - cmd.AddCommand(NewCLIDecisions().NewCommand()) + cmd.AddCommand(NewCLIDecisions(getconfig).NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 43dcc30e0db..04223ef93ab 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -32,7 +32,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { Short: "Manage interaction with Polling API (PAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err @@ -59,7 +59,7 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { var err error cfg := cli.cfg() dbClient, err = database.NewClient(cfg.DbConfig) @@ -111,7 +111,7 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { var err error cfg := cli.cfg() t := tomb.Tomb{} diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 19eb1b99d01..a6e710c5747 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -211,14 +211,17 @@ func (cli *cliSimulation) enableGlobalSimulation() error { func (cli *cliSimulation) dumpSimulationFile() error { cfg := cli.cfg() + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal simulation configuration: %s", err) } + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err) } + log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath) return nil @@ -230,16 +233,19 @@ func (cli *cliSimulation) disableGlobalSimulation() error { *cfg.Cscli.SimulationConfig.Simulation = false cfg.Cscli.SimulationConfig.Exclusions = []string{} + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal new simulation configuration: %s", err) } + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s' : %s", cfg.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("unable to write new simulation config in '%s': %s", cfg.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") + return nil } @@ -249,10 +255,13 @@ func (cli *cliSimulation) status() { log.Printf("global simulation: disabled (configuration file is missing)") return } + if *cfg.Cscli.SimulationConfig.Simulation { log.Println("global simulation: enabled") + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios not in simulation mode :") + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index ed7f7cf2ffd..6ddfa3056bc 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -76,9 +76,10 @@ func collectMetrics() ([]byte, []byte, error) { if err != nil { return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err) } + client := &http.Client{} - resp, err := client.Do(req) + resp, err := client.Do(req) if err != nil { return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err) } @@ -100,17 +101,20 @@ func collectVersion() []byte { func collectFeatures() []byte { log.Info("Collecting feature flags") + enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() w := bytes.NewBuffer(nil) for _, k := range enabledFeatures { fmt.Fprintf(w, "%s\n", k) } + return w.Bytes() } func collectOSInfo() ([]byte, error) { log.Info("Collecting OS info") + info, err := osinfo.GetOSInfo() if err != nil { @@ -133,6 +137,7 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte { var err error out := bytes.NewBuffer(nil) + log.Infof("Collecting %s list", itemType) items := make(map[string][]*cwhub.Item) @@ -144,26 +149,33 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte { if err := listItems(out, []string{itemType}, items, false); err != nil { log.Warnf("could not collect %s list: %s", itemType, err) } + return out.Bytes() } func collectBouncers(dbClient *database.Client) ([]byte, error) { out := bytes.NewBuffer(nil) + bouncers, err := dbClient.ListBouncers() if err != nil { return nil, fmt.Errorf("unable to list bouncers: %s", err) } + getBouncersTable(out, bouncers) + return out.Bytes(), nil } func collectAgents(dbClient *database.Client) ([]byte, error) { out := bytes.NewBuffer(nil) + machines, err := dbClient.ListMachines() if err != nil { return nil, fmt.Errorf("unable to list machines: %s", err) } + getAgentsTable(out, machines) + return out.Bytes(), nil } @@ -171,12 +183,14 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { return []byte("No agent credentials found, are we LAPI ?") } + pwd := strfmt.Password(password) - apiurl, err := url.Parse(endpoint) + apiurl, err := url.Parse(endpoint) if err != nil { return []byte(fmt.Sprintf("cannot parse API URL: %s", err)) } + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) if err != nil { return []byte(fmt.Sprintf("could not collect scenarios: %s", err)) @@ -189,6 +203,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str if err != nil { return []byte(fmt.Sprintf("could not init client: %s", err)) } + t := models.WatcherAuthRequest{ MachineID: &login, Password: &pwd, @@ -205,6 +220,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str func collectCrowdsecConfig() []byte { log.Info("Collecting crowdsec config") + config, err := os.ReadFile(*csConfig.FilePath) if err != nil { return []byte(fmt.Sprintf("could not read config file: %s", err)) @@ -217,15 +233,18 @@ func collectCrowdsecConfig() []byte { func collectCrowdsecProfile() []byte { log.Info("Collecting crowdsec profile") + config, err := os.ReadFile(csConfig.API.Server.ProfilesPath) if err != nil { return []byte(fmt.Sprintf("could not read profile file: %s", err)) } + return config } func collectAcquisitionConfig() map[string][]byte { log.Info("Collecting acquisition config") + ret := make(map[string][]byte) for _, filename := range csConfig.Crowdsec.AcquisitionFiles { @@ -287,7 +306,7 @@ cscli support dump -f /tmp/crowdsec-support.zip `, Args: cobra.NoArgs, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { var err error var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool infos := map[string][]byte{ @@ -307,13 +326,13 @@ cscli support dump -f /tmp/crowdsec-support.zip infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) } - if err := csConfig.LoadAPIServer(true); err != nil { + if err = csConfig.LoadAPIServer(true); err != nil { log.Warnf("could not load LAPI, skipping CAPI check") skipLAPI = true infos[SUPPORT_CAPI_STATUS_PATH] = []byte(err.Error()) } - if err := csConfig.LoadCrowdsec(); err != nil { + if err = csConfig.LoadCrowdsec(); err != nil { log.Warnf("could not load agent config, skipping crowdsec config check") skipAgent = true } @@ -399,7 +418,6 @@ cscli support dump -f /tmp/crowdsec-support.zip } if !skipAgent { - acquis := collectAcquisitionConfig() for filename, content := range acquis { diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index b568c6eae3f..1ae8f9da2ed 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -25,6 +25,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * return fmt.Errorf("%s isn't a valid range", *ipRange) } } + if *ip != "" { ipRepr := net.ParseIP(*ip) if ipRepr == nil { @@ -32,7 +33,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * } } - //avoid confusion on scope (ip vs Ip and range vs Range) + // avoid confusion on scope (ip vs Ip and range vs Range) switch strings.ToLower(*scope) { case "ip": *scope = types.Ip @@ -43,6 +44,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * case "as": *scope = types.AS } + return nil } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index cdff39e700f..de8ee4934a7 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -178,6 +178,7 @@ func (l *LocalApiClientCfg) Load() error { func (lapiCfg *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { trustedIPs := make([]net.IPNet, 0) + for _, ip := range lapiCfg.TrustedIPs { cidr := toValidCIDR(ip) @@ -265,7 +266,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { return fmt.Errorf("no listen_uri specified") } - //inherit log level from common, then api->server + // inherit log level from common, then api->server var logLevel log.Level if c.API.Server.LogLevel != nil { logLevel = *c.API.Server.LogLevel diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index a704414952e..2dc7ecc7d53 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -25,7 +25,7 @@ var globalConfig = Config{} // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags type Config struct { - //just a path to ourselves :p + // just a path to ourselves :p FilePath *string `yaml:"-"` Self []byte `yaml:"-"` Common *CommonCfg `yaml:"common,omitempty"` @@ -44,10 +44,12 @@ type Config struct { func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool) (*Config, string, error) { patcher := yamlpatch.NewPatcher(configFile, ".local") patcher.SetQuiet(inCli) + fcontent, err := patcher.MergedPatchContent() if err != nil { return nil, "", err } + configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) cfg := Config{ FilePath: &configFile, diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index ffbfc125b24..982976d70ed 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -19,7 +19,7 @@ teardown_file() { setup() { load "../lib/setup.sh" - if is_db_mysql; then sleep 0.3; fi + if is_db_mysql; then sleep 0.5; fi } api() { From 5ff8a03195fda2466cb4a18cfd11256467fa064a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 2 Feb 2024 09:45:03 +0100 Subject: [PATCH 009/318] refact "cscli metrics" par 1 (#2805) --- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/metrics.go | 68 +++++++++++++++++-------------- cmd/crowdsec-cli/metrics_table.go | 22 +++++----- cmd/crowdsec-cli/support.go | 2 +- 4 files changed, 50 insertions(+), 44 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1a7c84e3c78..db3a164af90 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -195,7 +195,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewConfigCmd()) cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) - cmd.AddCommand(NewMetricsCmd()) + cmd.AddCommand(NewCLIMetrics(getconfig).NewCommand()) cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) cmd.AddCommand(NewCLIDecisions(getconfig).NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 5b24dc84c91..902d4f0f445 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -19,8 +19,19 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" ) +type cliMetrics struct { + cfg configGetter +} + +func NewCLIMetrics(getconfig configGetter) *cliMetrics { + return &cliMetrics{ + cfg: getconfig, + } +} + + // FormatPrometheusMetrics is a complete rip from prom2json -func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error { +func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUnit bool) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -256,9 +267,9 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error } if formatType == "human" { - acquisStatsTable(out, acquis_stats) - bucketStatsTable(out, buckets_stats) - parserStatsTable(out, parsers_stats) + acquisStatsTable(out, acquis_stats, noUnit) + bucketStatsTable(out, buckets_stats, noUnit) + parserStatsTable(out, parsers_stats, noUnit) lapiStatsTable(out, lapi_stats) lapiMachineStatsTable(out, lapi_machine_stats) lapiBouncerStatsTable(out, lapi_bouncer_stats) @@ -266,8 +277,8 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error decisionStatsTable(out, decisions_stats) alertStatsTable(out, alerts_stats) stashStatsTable(out, stash_stats) - appsecMetricsToTable(out, appsec_engine_stats) - appsecRulesToTable(out, appsec_rule_stats) + appsecMetricsToTable(out, appsec_engine_stats, noUnit) + appsecRulesToTable(out, appsec_rule_stats, noUnit) return nil } @@ -304,52 +315,47 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error return nil } -var noUnit bool - -func runMetrics(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - url, err := flags.GetString("url") - if err != nil { - return err - } +func (cli *cliMetrics) run(url string, noUnit bool) error { + cfg := cli.cfg() if url != "" { - csConfig.Cscli.PrometheusUrl = url - } - - noUnit, err = flags.GetBool("no-unit") - if err != nil { - return err + cfg.Cscli.PrometheusUrl = url } - if csConfig.Prometheus == nil { + if cfg.Prometheus == nil { return fmt.Errorf("prometheus section missing, can't show metrics") } - if !csConfig.Prometheus.Enabled { + if !cfg.Prometheus.Enabled { return fmt.Errorf("prometheus is not enabled, can't show metrics") } - if err = FormatPrometheusMetrics(color.Output, csConfig.Cscli.PrometheusUrl, csConfig.Cscli.Output); err != nil { + if err := FormatPrometheusMetrics(color.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Output, noUnit); err != nil { return err } return nil } -func NewMetricsCmd() *cobra.Command { - cmdMetrics := &cobra.Command{ +func (cli *cliMetrics) NewCommand() *cobra.Command { + var ( + url string + noUnit bool + ) + + cmd := &cobra.Command{ Use: "metrics", Short: "Display crowdsec prometheus metrics.", Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runMetrics, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.run(url, noUnit) + }, } - flags := cmdMetrics.PersistentFlags() - flags.StringP("url", "u", "", "Prometheus url (http://:/metrics)") - flags.Bool("no-unit", false, "Show the real number instead of formatted with units") + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Prometheus url (http://:/metrics)") + flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") - return cmdMetrics + return cmd } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 80b9cb6e435..287333b5f3c 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -43,7 +43,7 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i return numRows } -func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string) (int, error) { +func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, fmt.Errorf("nil table") } @@ -81,7 +81,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri return numRows, nil } -func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { +func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -89,7 +89,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} - if numRows, err := metricsToTable(t, stats, keys); err != nil { + if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { log.Warningf("while collecting bucket stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nBucket Metrics:") @@ -97,7 +97,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { } } -func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { +func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") @@ -105,7 +105,7 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { keys := []string{"reads", "parsed", "unparsed", "pour"} - if numRows, err := metricsToTable(t, stats, keys); err != nil { + if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAcquisition Metrics:") @@ -113,13 +113,13 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { } } -func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int) { +func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") t.SetAlignment(table.AlignLeft, table.AlignLeft) keys := []string{"processed", "blocked"} - if numRows, err := metricsToTable(t, metrics, keys); err != nil { + if numRows, err := metricsToTable(t, metrics, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAppsec Metrics:") @@ -127,14 +127,14 @@ func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int) { } } -func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int) { +func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int, noUnit bool) { for appsecEngine, appsecEngineRulesStats := range metrics { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") t.SetAlignment(table.AlignLeft, table.AlignLeft) keys := []string{"triggered"} - if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys); err != nil { + if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 { renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) @@ -144,7 +144,7 @@ func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string] } -func parserStatsTable(out io.Writer, stats map[string]map[string]int) { +func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") @@ -152,7 +152,7 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int) { keys := []string{"hits", "parsed", "unparsed"} - if numRows, err := metricsToTable(t, stats, keys); err != nil { + if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nParser Metrics:") diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 6ddfa3056bc..e0a2fa9db90 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -66,7 +66,7 @@ func collectMetrics() ([]byte, []byte, error) { } humanMetrics := bytes.NewBuffer(nil) - err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human") + err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human", false) if err != nil { return nil, nil, fmt.Errorf("could not fetch promtheus metrics: %s", err) From 81acad0d668fb575fb207e54f300e0dd4e39e05e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 2 Feb 2024 10:40:55 +0100 Subject: [PATCH 010/318] refact "cscli metrics" part 2 (#2806) --- cmd/crowdsec-cli/metrics.go | 243 ++++++++++++++++-------------- cmd/crowdsec-cli/metrics_table.go | 60 ++++---- 2 files changed, 155 insertions(+), 148 deletions(-) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 902d4f0f445..ad255e847db 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -19,6 +19,27 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" ) +type ( + statAcquis map[string]map[string]int + statParser map[string]map[string]int + statBucket map[string]map[string]int + statLapi map[string]map[string]int + statLapiMachine map[string]map[string]map[string]int + statLapiBouncer map[string]map[string]map[string]int + statLapiDecision map[string]struct { + NonEmpty int + Empty int + } + statDecision map[string]map[string]map[string]int + statAppsecEngine map[string]map[string]int + statAppsecRule map[string]map[string]map[string]int + statAlert map[string]int + statStash map[string]struct { + Type string + Count int + } +) + type cliMetrics struct { cfg configGetter } @@ -29,7 +50,6 @@ func NewCLIMetrics(getconfig configGetter) *cliMetrics { } } - // FormatPrometheusMetrics is a complete rip from prom2json func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUnit bool) error { mfChan := make(chan *dto.MetricFamily, 1024) @@ -63,24 +83,19 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni log.Debugf("Finished reading prometheus output, %d entries", len(result)) /*walk*/ - lapi_decisions_stats := map[string]struct { - NonEmpty int - Empty int - }{} - acquis_stats := map[string]map[string]int{} - parsers_stats := map[string]map[string]int{} - buckets_stats := map[string]map[string]int{} - lapi_stats := map[string]map[string]int{} - lapi_machine_stats := map[string]map[string]map[string]int{} - lapi_bouncer_stats := map[string]map[string]map[string]int{} - decisions_stats := map[string]map[string]map[string]int{} - appsec_engine_stats := map[string]map[string]int{} - appsec_rule_stats := map[string]map[string]map[string]int{} - alerts_stats := map[string]int{} - stash_stats := map[string]struct { - Type string - Count int - }{} + + mAcquis := statAcquis{} + mParser := statParser{} + mBucket := statBucket{} + mLapi := statLapi{} + mLapiMachine := statLapiMachine{} + mLapiBouncer := statLapiBouncer{} + mLapiDecision := statLapiDecision{} + mDecision := statDecision{} + mAppsecEngine := statAppsecEngine{} + mAppsecRule := statAppsecRule{} + mAlert := statAlert{} + mStash := statStash{} for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { @@ -127,138 +142,138 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni switch fam.Name { /*buckets*/ case "cs_bucket_created_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["instantiation"] += ival + mBucket[name]["instantiation"] += ival case "cs_buckets": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["curr_count"] += ival + mBucket[name]["curr_count"] += ival case "cs_bucket_overflowed_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["overflow"] += ival + mBucket[name]["overflow"] += ival case "cs_bucket_poured_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - buckets_stats[name]["pour"] += ival - acquis_stats[source]["pour"] += ival + mBucket[name]["pour"] += ival + mAcquis[source]["pour"] += ival case "cs_bucket_underflowed_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["underflow"] += ival + mBucket[name]["underflow"] += ival /*acquis*/ case "cs_parser_hits_total": - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - acquis_stats[source]["reads"] += ival + mAcquis[source]["reads"] += ival case "cs_parser_hits_ok_total": - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - acquis_stats[source]["parsed"] += ival + mAcquis[source]["parsed"] += ival case "cs_parser_hits_ko_total": - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - acquis_stats[source]["unparsed"] += ival + mAcquis[source]["unparsed"] += ival case "cs_node_hits_total": - if _, ok := parsers_stats[name]; !ok { - parsers_stats[name] = make(map[string]int) + if _, ok := mParser[name]; !ok { + mParser[name] = make(map[string]int) } - parsers_stats[name]["hits"] += ival + mParser[name]["hits"] += ival case "cs_node_hits_ok_total": - if _, ok := parsers_stats[name]; !ok { - parsers_stats[name] = make(map[string]int) + if _, ok := mParser[name]; !ok { + mParser[name] = make(map[string]int) } - parsers_stats[name]["parsed"] += ival + mParser[name]["parsed"] += ival case "cs_node_hits_ko_total": - if _, ok := parsers_stats[name]; !ok { - parsers_stats[name] = make(map[string]int) + if _, ok := mParser[name]; !ok { + mParser[name] = make(map[string]int) } - parsers_stats[name]["unparsed"] += ival + mParser[name]["unparsed"] += ival case "cs_lapi_route_requests_total": - if _, ok := lapi_stats[route]; !ok { - lapi_stats[route] = make(map[string]int) + if _, ok := mLapi[route]; !ok { + mLapi[route] = make(map[string]int) } - lapi_stats[route][method] += ival + mLapi[route][method] += ival case "cs_lapi_machine_requests_total": - if _, ok := lapi_machine_stats[machine]; !ok { - lapi_machine_stats[machine] = make(map[string]map[string]int) + if _, ok := mLapiMachine[machine]; !ok { + mLapiMachine[machine] = make(map[string]map[string]int) } - if _, ok := lapi_machine_stats[machine][route]; !ok { - lapi_machine_stats[machine][route] = make(map[string]int) + if _, ok := mLapiMachine[machine][route]; !ok { + mLapiMachine[machine][route] = make(map[string]int) } - lapi_machine_stats[machine][route][method] += ival + mLapiMachine[machine][route][method] += ival case "cs_lapi_bouncer_requests_total": - if _, ok := lapi_bouncer_stats[bouncer]; !ok { - lapi_bouncer_stats[bouncer] = make(map[string]map[string]int) + if _, ok := mLapiBouncer[bouncer]; !ok { + mLapiBouncer[bouncer] = make(map[string]map[string]int) } - if _, ok := lapi_bouncer_stats[bouncer][route]; !ok { - lapi_bouncer_stats[bouncer][route] = make(map[string]int) + if _, ok := mLapiBouncer[bouncer][route]; !ok { + mLapiBouncer[bouncer][route] = make(map[string]int) } - lapi_bouncer_stats[bouncer][route][method] += ival + mLapiBouncer[bouncer][route][method] += ival case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": - if _, ok := lapi_decisions_stats[bouncer]; !ok { - lapi_decisions_stats[bouncer] = struct { + if _, ok := mLapiDecision[bouncer]; !ok { + mLapiDecision[bouncer] = struct { NonEmpty int Empty int }{} } - x := lapi_decisions_stats[bouncer] + x := mLapiDecision[bouncer] if fam.Name == "cs_lapi_decisions_ko_total" { x.Empty += ival } else if fam.Name == "cs_lapi_decisions_ok_total" { x.NonEmpty += ival } - lapi_decisions_stats[bouncer] = x + mLapiDecision[bouncer] = x case "cs_active_decisions": - if _, ok := decisions_stats[reason]; !ok { - decisions_stats[reason] = make(map[string]map[string]int) + if _, ok := mDecision[reason]; !ok { + mDecision[reason] = make(map[string]map[string]int) } - if _, ok := decisions_stats[reason][origin]; !ok { - decisions_stats[reason][origin] = make(map[string]int) + if _, ok := mDecision[reason][origin]; !ok { + mDecision[reason][origin] = make(map[string]int) } - decisions_stats[reason][origin][action] += ival + mDecision[reason][origin][action] += ival case "cs_alerts": - /*if _, ok := alerts_stats[scenario]; !ok { - alerts_stats[scenario] = make(map[string]int) + /*if _, ok := mAlert[scenario]; !ok { + mAlert[scenario] = make(map[string]int) }*/ - alerts_stats[reason] += ival + mAlert[reason] += ival case "cs_cache_size": - stash_stats[name] = struct { + mStash[name] = struct { Type string Count int }{Type: mtype, Count: ival} case "cs_appsec_reqs_total": - if _, ok := appsec_engine_stats[metric.Labels["appsec_engine"]]; !ok { - appsec_engine_stats[metric.Labels["appsec_engine"]] = make(map[string]int, 0) + if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { + mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) } - appsec_engine_stats[metric.Labels["appsec_engine"]]["processed"] = ival + mAppsecEngine[metric.Labels["appsec_engine"]]["processed"] = ival case "cs_appsec_block_total": - if _, ok := appsec_engine_stats[metric.Labels["appsec_engine"]]; !ok { - appsec_engine_stats[metric.Labels["appsec_engine"]] = make(map[string]int, 0) + if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { + mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) } - appsec_engine_stats[metric.Labels["appsec_engine"]]["blocked"] = ival + mAppsecEngine[metric.Labels["appsec_engine"]]["blocked"] = ival case "cs_appsec_rule_hits": appsecEngine := metric.Labels["appsec_engine"] ruleID := metric.Labels["rule_name"] - if _, ok := appsec_rule_stats[appsecEngine]; !ok { - appsec_rule_stats[appsecEngine] = make(map[string]map[string]int, 0) + if _, ok := mAppsecRule[appsecEngine]; !ok { + mAppsecRule[appsecEngine] = make(map[string]map[string]int, 0) } - if _, ok := appsec_rule_stats[appsecEngine][ruleID]; !ok { - appsec_rule_stats[appsecEngine][ruleID] = make(map[string]int, 0) + if _, ok := mAppsecRule[appsecEngine][ruleID]; !ok { + mAppsecRule[appsecEngine][ruleID] = make(map[string]int, 0) } - appsec_rule_stats[appsecEngine][ruleID]["triggered"] = ival + mAppsecRule[appsecEngine][ruleID]["triggered"] = ival default: log.Debugf("unknown: %+v", fam.Name) continue @@ -267,33 +282,33 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni } if formatType == "human" { - acquisStatsTable(out, acquis_stats, noUnit) - bucketStatsTable(out, buckets_stats, noUnit) - parserStatsTable(out, parsers_stats, noUnit) - lapiStatsTable(out, lapi_stats) - lapiMachineStatsTable(out, lapi_machine_stats) - lapiBouncerStatsTable(out, lapi_bouncer_stats) - lapiDecisionStatsTable(out, lapi_decisions_stats) - decisionStatsTable(out, decisions_stats) - alertStatsTable(out, alerts_stats) - stashStatsTable(out, stash_stats) - appsecMetricsToTable(out, appsec_engine_stats, noUnit) - appsecRulesToTable(out, appsec_rule_stats, noUnit) + mAcquis.table(out, noUnit) + mBucket.table(out, noUnit) + mParser.table(out, noUnit) + mLapi.table(out) + mLapiMachine.table(out) + mLapiBouncer.table(out) + mLapiDecision.table(out) + mDecision.table(out) + mAlert.table(out) + mStash.table(out) + mAppsecEngine.table(out, noUnit) + mAppsecRule.table(out, noUnit) return nil } stats := make(map[string]any) - stats["acquisition"] = acquis_stats - stats["buckets"] = buckets_stats - stats["parsers"] = parsers_stats - stats["lapi"] = lapi_stats - stats["lapi_machine"] = lapi_machine_stats - stats["lapi_bouncer"] = lapi_bouncer_stats - stats["lapi_decisions"] = lapi_decisions_stats - stats["decisions"] = decisions_stats - stats["alerts"] = alerts_stats - stats["stash"] = stash_stats + stats["acquisition"] = mAcquis + stats["buckets"] = mBucket + stats["parsers"] = mParser + stats["lapi"] = mLapi + stats["lapi_machine"] = mLapiMachine + stats["lapi_bouncer"] = mLapiBouncer + stats["lapi_decisions"] = mLapiDecision + stats["decisions"] = mDecision + stats["alerts"] = mAlert + stats["stash"] = mStash switch formatType { case "json": diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 287333b5f3c..835277aa4ee 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -81,7 +81,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri return numRows, nil } -func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { +func (s statBucket) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -89,7 +89,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} - if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting bucket stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nBucket Metrics:") @@ -97,7 +97,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo } } -func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { +func (s statAcquis) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") @@ -105,7 +105,7 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo keys := []string{"reads", "parsed", "unparsed", "pour"} - if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAcquisition Metrics:") @@ -113,13 +113,13 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo } } -func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int, noUnit bool) { +func (s statAppsecEngine) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") t.SetAlignment(table.AlignLeft, table.AlignLeft) keys := []string{"processed", "blocked"} - if numRows, err := metricsToTable(t, metrics, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAppsec Metrics:") @@ -127,8 +127,8 @@ func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int, noUn } } -func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int, noUnit bool) { - for appsecEngine, appsecEngineRulesStats := range metrics { +func (s statAppsecRule) table(out io.Writer, noUnit bool) { + for appsecEngine, appsecEngineRulesStats := range s { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") @@ -144,7 +144,7 @@ func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string] } -func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { +func (s statParser) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") @@ -152,7 +152,7 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo keys := []string{"hits", "parsed", "unparsed"} - if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nParser Metrics:") @@ -160,11 +160,7 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo } } -func stashStatsTable(out io.Writer, stats map[string]struct { - Type string - Count int -}) { - +func (s statStash) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Name", "Type", "Items") @@ -172,14 +168,14 @@ func stashStatsTable(out io.Writer, stats map[string]struct { // unfortunately, we can't reuse metricsToTable as the structure is too different :/ sortedKeys := []string{} - for k := range stats { + for k := range s { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) numRows := 0 for _, alabel := range sortedKeys { - astats := stats[alabel] + astats := s[alabel] row := []string{ alabel, @@ -195,7 +191,7 @@ func stashStatsTable(out io.Writer, stats map[string]struct { } } -func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { +func (s statLapi) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Route", "Method", "Hits") @@ -203,14 +199,14 @@ func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { // unfortunately, we can't reuse metricsToTable as the structure is too different :/ sortedKeys := []string{} - for k := range stats { + for k := range s { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) numRows := 0 for _, alabel := range sortedKeys { - astats := stats[alabel] + astats := s[alabel] subKeys := []string{} for skey := range astats { @@ -235,13 +231,13 @@ func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { } } -func lapiMachineStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { +func (s statLapiMachine) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Machine", "Route", "Method", "Hits") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - numRows := lapiMetricsToTable(t, stats) + numRows := lapiMetricsToTable(t, s) if numRows > 0 { renderTableTitle(out, "\nLocal API Machines Metrics:") @@ -249,13 +245,13 @@ func lapiMachineStatsTable(out io.Writer, stats map[string]map[string]map[string } } -func lapiBouncerStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { +func (s statLapiBouncer) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Route", "Method", "Hits") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - numRows := lapiMetricsToTable(t, stats) + numRows := lapiMetricsToTable(t, s) if numRows > 0 { renderTableTitle(out, "\nLocal API Bouncers Metrics:") @@ -263,18 +259,14 @@ func lapiBouncerStatsTable(out io.Writer, stats map[string]map[string]map[string } } -func lapiDecisionStatsTable(out io.Writer, stats map[string]struct { - NonEmpty int - Empty int -}, -) { +func (s statLapiDecision) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 - for bouncer, hits := range stats { + for bouncer, hits := range s { t.AddRow( bouncer, fmt.Sprintf("%d", hits.Empty), @@ -289,14 +281,14 @@ func lapiDecisionStatsTable(out io.Writer, stats map[string]struct { } } -func decisionStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { +func (s statDecision) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Origin", "Action", "Count") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 - for reason, origins := range stats { + for reason, origins := range s { for origin, actions := range origins { for action, hits := range actions { t.AddRow( @@ -316,14 +308,14 @@ func decisionStatsTable(out io.Writer, stats map[string]map[string]map[string]in } } -func alertStatsTable(out io.Writer, stats map[string]int) { +func (s statAlert) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Count") t.SetAlignment(table.AlignLeft, table.AlignLeft) numRows := 0 - for scenario, hits := range stats { + for scenario, hits := range s { t.AddRow( scenario, fmt.Sprintf("%d", hits), From fdc525164a675f7038ccd828967471edd6076eb1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 Feb 2024 10:07:05 +0100 Subject: [PATCH 011/318] refact "cscli metrics" part 3 (#2807) --- cmd/crowdsec-cli/main.go | 2 + cmd/crowdsec-cli/metrics.go | 267 ++++++++++++++++++++++++------ cmd/crowdsec-cli/metrics_table.go | 170 ++++++++++++------- cmd/crowdsec-cli/support.go | 11 +- test/bats/01_cscli.bats | 9 - test/bats/08_metrics.bats | 56 ++++++- 6 files changed, 394 insertions(+), 121 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index db3a164af90..3b20cf112c0 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -146,6 +146,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall FlagsDataType: cc.White, Flags: cc.Green, FlagsDescr: cc.Cyan, + NoExtraNewlines: true, + NoBottomNewline: true, }) cmd.SetOut(color.Output) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index ad255e847db..c883c809291 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -16,6 +16,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" ) @@ -40,18 +41,31 @@ type ( } ) -type cliMetrics struct { - cfg configGetter +type metricSection interface { + Table(io.Writer, bool, bool) + Description() (string, string) } -func NewCLIMetrics(getconfig configGetter) *cliMetrics { - return &cliMetrics{ - cfg: getconfig, +type metricStore map[string]metricSection + +func NewMetricStore() metricStore { + return metricStore{ + "acquisition": statAcquis{}, + "buckets": statBucket{}, + "parsers": statParser{}, + "lapi": statLapi{}, + "lapi-machine": statLapiMachine{}, + "lapi-bouncer": statLapiBouncer{}, + "lapi-decisions": statLapiDecision{}, + "decisions": statDecision{}, + "alerts": statAlert{}, + "stash": statStash{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, } } -// FormatPrometheusMetrics is a complete rip from prom2json -func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUnit bool) error { +func (ms metricStore) Fetch(url string) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -64,9 +78,10 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni transport.ResponseHeaderTimeout = time.Minute go func() { defer trace.CatchPanic("crowdsec/ShowPrometheus") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { - errChan <- fmt.Errorf("failed to fetch prometheus metrics: %w", err) + errChan <- fmt.Errorf("failed to fetch metrics: %w", err) return } errChan <- nil @@ -81,21 +96,21 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni return err } - log.Debugf("Finished reading prometheus output, %d entries", len(result)) + log.Debugf("Finished reading metrics output, %d entries", len(result)) /*walk*/ - mAcquis := statAcquis{} - mParser := statParser{} - mBucket := statBucket{} - mLapi := statLapi{} - mLapiMachine := statLapiMachine{} - mLapiBouncer := statLapiBouncer{} - mLapiDecision := statLapiDecision{} - mDecision := statDecision{} - mAppsecEngine := statAppsecEngine{} - mAppsecRule := statAppsecRule{} - mAlert := statAlert{} - mStash := statStash{} + mAcquis := ms["acquisition"].(statAcquis) + mParser := ms["parsers"].(statParser) + mBucket := ms["buckets"].(statBucket) + mLapi := ms["lapi"].(statLapi) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) + mLapiDecision := ms["lapi-decisions"].(statLapiDecision) + mDecision := ms["decisions"].(statDecision) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mAlert := ms["alerts"].(statAlert) + mStash := ms["stash"].(statStash) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { @@ -281,44 +296,50 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni } } - if formatType == "human" { - mAcquis.table(out, noUnit) - mBucket.table(out, noUnit) - mParser.table(out, noUnit) - mLapi.table(out) - mLapiMachine.table(out) - mLapiBouncer.table(out) - mLapiDecision.table(out) - mDecision.table(out) - mAlert.table(out) - mStash.table(out) - mAppsecEngine.table(out, noUnit) - mAppsecRule.table(out, noUnit) - return nil + return nil +} + +type cliMetrics struct { + cfg configGetter +} + +func NewCLIMetrics(getconfig configGetter) *cliMetrics { + return &cliMetrics{ + cfg: getconfig, } +} - stats := make(map[string]any) +func (ms metricStore) Format(out io.Writer, sections []string, formatType string, noUnit bool) error { + // copy only the sections we want + want := map[string]metricSection{} - stats["acquisition"] = mAcquis - stats["buckets"] = mBucket - stats["parsers"] = mParser - stats["lapi"] = mLapi - stats["lapi_machine"] = mLapiMachine - stats["lapi_bouncer"] = mLapiBouncer - stats["lapi_decisions"] = mLapiDecision - stats["decisions"] = mDecision - stats["alerts"] = mAlert - stats["stash"] = mStash + // if explicitly asking for sections, we want to show empty tables + showEmpty := len(sections) > 0 + + // if no sections are specified, we want all of them + if len(sections) == 0 { + for section := range ms { + sections = append(sections, section) + } + } + + for _, section := range sections { + want[section] = ms[section] + } switch formatType { + case "human": + for section := range want { + want[section].Table(out, noUnit, showEmpty) + } case "json": - x, err := json.MarshalIndent(stats, "", " ") + x, err := json.MarshalIndent(want, "", " ") if err != nil { return fmt.Errorf("failed to unmarshal metrics : %v", err) } out.Write(x) case "raw": - x, err := yaml.Marshal(stats) + x, err := yaml.Marshal(want) if err != nil { return fmt.Errorf("failed to unmarshal metrics : %v", err) } @@ -330,7 +351,7 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni return nil } -func (cli *cliMetrics) run(url string, noUnit bool) error { +func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { cfg := cli.cfg() if url != "" { @@ -345,7 +366,20 @@ func (cli *cliMetrics) run(url string, noUnit bool) error { return fmt.Errorf("prometheus is not enabled, can't show metrics") } - if err := FormatPrometheusMetrics(color.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Output, noUnit); err != nil { + ms := NewMetricStore() + + if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + return err + } + + // any section that we don't have in the store is an error + for _, section := range sections { + if _, ok := ms[section]; !ok { + return fmt.Errorf("unknown metrics type: %s", section) + } + } + + if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil { return err } return nil @@ -360,11 +394,19 @@ func (cli *cliMetrics) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "metrics", Short: "Display crowdsec prometheus metrics.", - Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, + Long: `Fetch metrics from a Local API server and display them`, + Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") +cscli metrics + +# Show only some metrics, connect to a different url +cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers + +# List available metric types +cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return cli.run(url, noUnit) + return cli.show(nil, url, noUnit) }, } @@ -372,5 +414,126 @@ func (cli *cliMetrics) NewCommand() *cobra.Command { flags.StringVarP(&url, "url", "u", "", "Prometheus url (http://:/metrics)") flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + cmd.AddCommand(cli.newShowCmd()) + cmd.AddCommand(cli.newListCmd()) + + return cmd +} + +// expandAlias returns a list of sections. The input can be a list of sections or alias. +func (cli *cliMetrics) expandSectionGroups(args []string) []string { + ret := []string{} + for _, section := range args { + switch section { + case "engine": + ret = append(ret, "acquisition", "parsers", "buckets", "stash") + case "lapi": + ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") + case "appsec": + ret = append(ret, "appsec-engine", "appsec-rule") + default: + ret = append(ret, section) + } + } + + return ret +} + +func (cli *cliMetrics) newShowCmd() *cobra.Command { + var ( + url string + noUnit bool + ) + + cmd := &cobra.Command{ + Use: "show [type]...", + Short: "Display all or part of the available metrics.", + Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, + Example: `# Show all Metrics, skip empty tables +cscli metrics show + +# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics +cscli metrics show engine + +# Show some specific metrics, show empty tables, connect to a different url +cscli metrics show acquisition parsers buckets stash --url http://lapi.local:6060/metrics + +# Show metrics in json format +cscli metrics show acquisition parsers buckets stash -o json`, + // Positional args are optional + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + args = cli.expandSectionGroups(args) + return cli.show(args, url, noUnit) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Metrics url (http://:/metrics)") + flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + + return cmd +} + +func (cli *cliMetrics) list() error { + type metricType struct { + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` + Description string `json:"description" yaml:"description"` + } + + var allMetrics []metricType + + ms := NewMetricStore() + for _, section := range maptools.SortedKeys(ms) { + title, description := ms[section].Description() + allMetrics = append(allMetrics, metricType{ + Type: section, + Title: title, + Description: description, + }) + } + + switch cli.cfg().Cscli.Output { + case "human": + t := newTable(color.Output) + t.SetRowLines(true) + t.SetHeaders("Type", "Title", "Description") + + for _, metric := range allMetrics { + t.AddRow(metric.Type, metric.Title, metric.Description) + } + + t.Render() + case "json": + x, err := json.MarshalIndent(allMetrics, "", " ") + if err != nil { + return fmt.Errorf("failed to unmarshal metrics: %w", err) + } + fmt.Println(string(x)) + case "raw": + x, err := yaml.Marshal(allMetrics) + if err != nil { + return fmt.Errorf("failed to unmarshal metrics: %w", err) + } + fmt.Println(string(x)) + } + + return nil +} + +func (cli *cliMetrics) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available types of metrics.", + Long: `List available types of metrics.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + cli.list() + return nil + }, + } + return cmd } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 835277aa4ee..72f53f94c49 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -7,6 +7,8 @@ import ( "github.com/aquasecurity/table" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/maptools" ) func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { @@ -47,15 +49,10 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri if t == nil { return 0, fmt.Errorf("nil table") } - // sort keys to keep consistent order when printing - sortedKeys := []string{} - for k := range stats { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) numRows := 0 - for _, alabel := range sortedKeys { + + for _, alabel := range maptools.SortedKeys(stats) { astats, ok := stats[alabel] if !ok { continue @@ -81,7 +78,12 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri return numRows, nil } -func (s statBucket) table(out io.Writer, noUnit bool) { +func (s statBucket) Description() (string, string) { + return "Bucket Metrics", + `Measure events in different scenarios. Current count is the number of buckets during metrics collection. Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` +} + +func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -91,13 +93,19 @@ func (s statBucket) table(out io.Writer, noUnit bool) { if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting bucket stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nBucket Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAcquis) table(out io.Writer, noUnit bool) { +func (s statAcquis) Description() (string, string) { + return "Acquisition Metrics", + `Measures the lines read, parsed, and unparsed per datasource. Zero read lines indicate a misconfigured or inactive datasource. Zero parsed lines mean the parser(s) failed. Non-zero parsed lines are fine as crowdsec selects relevant lines.` +} + +func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") @@ -107,13 +115,19 @@ func (s statAcquis) table(out io.Writer, noUnit bool) { if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nAcquisition Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAppsecEngine) table(out io.Writer, noUnit bool) { +func (s statAppsecEngine) Description() (string, string) { + return "Appsec Metrics", + `Measures the number of parsed and blocked requests by the AppSec Component.` +} + +func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") @@ -121,13 +135,19 @@ func (s statAppsecEngine) table(out io.Writer, noUnit bool) { keys := []string{"processed", "blocked"} if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nAppsec Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAppsecRule) table(out io.Writer, noUnit bool) { +func (s statAppsecRule) Description() (string, string) { + return "Appsec Rule Metrics", + `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` +} + +func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { for appsecEngine, appsecEngineRulesStats := range s { t := newTable(out) t.SetRowLines(false) @@ -136,7 +156,7 @@ func (s statAppsecRule) table(out io.Writer, noUnit bool) { keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) - } else if numRows > 0 { + } else if numRows > 0 || showEmpty{ renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) t.Render() } @@ -144,7 +164,12 @@ func (s statAppsecRule) table(out io.Writer, noUnit bool) { } -func (s statParser) table(out io.Writer, noUnit bool) { +func (s statParser) Description() (string, string) { + return "Parser Metrics", + `Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.` +} + +func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") @@ -154,27 +179,28 @@ func (s statParser) table(out io.Writer, noUnit bool) { if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nParser Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statStash) table(out io.Writer) { +func (s statStash) Description() (string, string) { + return "Parser Stash Metrics", + `Tracks the status of stashes that might be created by various parsers and scenarios.` +} + +func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Name", "Type", "Items") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - sortedKeys := []string{} - for k := range s { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - numRows := 0 - for _, alabel := range sortedKeys { + + for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] row := []string{ @@ -185,27 +211,28 @@ func (s statStash) table(out io.Writer) { t.AddRow(row...) numRows++ } - if numRows > 0 { - renderTableTitle(out, "\nParser Stash Metrics:") + if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapi) table(out io.Writer) { +func (s statLapi) Description() (string, string) { + return "Local API Metrics", + `Monitors the requests made to local API routes.` +} + +func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Route", "Method", "Hits") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - sortedKeys := []string{} - for k := range s { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - numRows := 0 - for _, alabel := range sortedKeys { + + for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] subKeys := []string{} @@ -225,13 +252,19 @@ func (s statLapi) table(out io.Writer) { } } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Metrics:") + if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapiMachine) table(out io.Writer) { +func (s statLapiMachine) Description() (string, string) { + return "Local API Machines Metrics", + `Tracks the number of calls to the local API from each registered machine.` +} + +func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Machine", "Route", "Method", "Hits") @@ -239,13 +272,19 @@ func (s statLapiMachine) table(out io.Writer) { numRows := lapiMetricsToTable(t, s) - if numRows > 0 { - renderTableTitle(out, "\nLocal API Machines Metrics:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapiBouncer) table(out io.Writer) { +func (s statLapiBouncer) Description() (string, string) { + return "Local API Bouncers Metrics", + `Tracks total hits to remediation component related API routes.` +} + +func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Route", "Method", "Hits") @@ -253,13 +292,19 @@ func (s statLapiBouncer) table(out io.Writer) { numRows := lapiMetricsToTable(t, s) - if numRows > 0 { - renderTableTitle(out, "\nLocal API Bouncers Metrics:") + if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapiDecision) table(out io.Writer) { +func (s statLapiDecision) Description() (string, string) { + return "Local API Bouncers Decisions", + `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` +} + +func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") @@ -275,13 +320,19 @@ func (s statLapiDecision) table(out io.Writer) { numRows++ } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Bouncers Decisions:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statDecision) table(out io.Writer) { +func (s statDecision) Description() (string, string) { + return "Local API Decisions", + `Provides information about all currently active decisions. Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` +} + +func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Origin", "Action", "Count") @@ -302,13 +353,19 @@ func (s statDecision) table(out io.Writer) { } } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Decisions:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAlert) table(out io.Writer) { +func (s statAlert) Description() (string, string) { + return "Local API Alerts", + `Tracks the total number of past and present alerts for the installed scenarios.` +} + +func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Count") @@ -323,8 +380,9 @@ func (s statAlert) table(out io.Writer) { numRows++ } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Alerts:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index e0a2fa9db90..661950fa8f6 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -66,10 +66,15 @@ func collectMetrics() ([]byte, []byte, error) { } humanMetrics := bytes.NewBuffer(nil) - err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human", false) - if err != nil { - return nil, nil, fmt.Errorf("could not fetch promtheus metrics: %s", err) + ms := NewMetricStore() + + if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil { + return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %s", err) + } + + if err := ms.Format(humanMetrics, nil, "human", false); err != nil { + return nil, nil, err } req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 3a5b4aad04c..60a65b98d58 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -273,15 +273,6 @@ teardown() { assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' } -@test "cscli metrics" { - rune -0 ./instance-crowdsec start - rune -0 cscli lapi status - rune -0 cscli metrics - assert_output --partial "Route" - assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" -} - @test "'cscli completion' with or without configuration file" { rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index 0275d7fd4a0..8bf30812cff 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -25,7 +25,7 @@ teardown() { @test "cscli metrics (crowdsec not running)" { rune -1 cscli metrics # crowdsec is down - assert_stderr --partial 'failed to fetch prometheus metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' + assert_stderr --partial 'failed to fetch metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' } @test "cscli metrics (bad configuration)" { @@ -59,3 +59,57 @@ teardown() { rune -1 cscli metrics assert_stderr --partial "prometheus is not enabled, can't show metrics" } + +@test "cscli metrics" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + rune -0 cscli metrics + assert_output --partial "Route" + assert_output --partial '/v1/watchers/login' + assert_output --partial "Local API Metrics:" + + rune -0 cscli metrics -o json + rune -0 jq 'keys' <(output) + assert_output --partial '"alerts",' + assert_output --partial '"parsers",' + + rune -0 cscli metrics -o raw + assert_output --partial 'alerts: {}' + assert_output --partial 'parsers: {}' +} + +@test "cscli metrics list" { + rune -0 cscli metrics list + assert_output --regexp "Type.*Title.*Description" + + rune -0 cscli metrics list -o json + rune -0 jq -c '.[] | [.type,.title]' <(output) + assert_line '["acquisition","Acquisition Metrics"]' + + rune -0 cscli metrics list -o raw + assert_line "- type: acquisition" + assert_line " title: Acquisition Metrics" +} + +@test "cscli metrics show" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + + assert_equal "$(cscli metrics)" "$(cscli metrics show)" + + rune -1 cscli metrics show foobar + assert_stderr --partial "unknown metrics type: foobar" + + rune -0 cscli metrics show lapi + assert_output --partial "Local API Metrics:" + assert_output --regexp "Route.*Method.*Hits" + assert_output --regexp "/v1/watchers/login.*POST" + + rune -0 cscli metrics show lapi -o json + rune -0 jq -c '.lapi."/v1/watchers/login" | keys' <(output) + assert_json '["POST"]' + + rune -0 cscli metrics show lapi -o raw + assert_line 'lapi:' + assert_line ' /v1/watchers/login:' +} From 4e724f6c0a54ad1c67eeab6ca3be62f00ee0cf20 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 Feb 2024 10:50:28 +0100 Subject: [PATCH 012/318] refact "cscli" root cmd (#2811) * refact "cscli" root cmd * lint (naming, imports, whitespace) --- .golangci.yml | 2 +- cmd/crowdsec-cli/bouncers.go | 10 +- cmd/crowdsec-cli/config_restore.go | 9 +- cmd/crowdsec-cli/dashboard.go | 24 +-- cmd/crowdsec-cli/dashboard_unsupported.go | 4 +- cmd/crowdsec-cli/decisions.go | 8 +- cmd/crowdsec-cli/hub.go | 6 +- cmd/crowdsec-cli/machines.go | 21 +-- cmd/crowdsec-cli/main.go | 207 +++++++++++++--------- cmd/crowdsec-cli/metrics.go | 31 ++-- cmd/crowdsec-cli/papi.go | 8 +- cmd/crowdsec-cli/simulation.go | 9 +- 12 files changed, 199 insertions(+), 140 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e1f2fc09a84..dbeb96875c5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -53,7 +53,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 27 + min-complexity: 28 nlreturn: block-size: 4 diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index d2685901ebb..717e9aef5fe 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -36,13 +36,13 @@ func askYesNo(message string, defaultAnswer bool) (bool, error) { } type cliBouncers struct { - db *database.Client + db *database.Client cfg configGetter } -func NewCLIBouncers(getconfig configGetter) *cliBouncers { +func NewCLIBouncers(cfg configGetter) *cliBouncers { return &cliBouncers{ - cfg: getconfig, + cfg: cfg, } } @@ -197,13 +197,13 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliBouncers) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { bouncers, err := cli.db.ListBouncers() if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) } - ret :=[]string{} + ret := []string{} for _, bouncer := range bouncers { if strings.Contains(bouncer.Name, toComplete) && !slices.Contains(args, bouncer.Name) { diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index e9c2fa9aa23..17d7494c60f 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -146,7 +146,12 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { // Now we have config.yaml, we should regenerate config struct to have rights paths etc ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) - initConfig() + log.Debug("Reloading configuration") + + csConfig, _, err = loadConfigFor("config") + if err != nil { + return fmt.Errorf("failed to reload configuration: %s", err) + } backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) if _, err = os.Stat(backupCAPICreds); err == nil { @@ -227,7 +232,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { } } - // if there is files in the acquis backup dir, restore them + // if there are files in the acquis backup dir, restore them acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { for _, acquisFile := range acquisFiles { diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 64cb7577e89..59b9e67cd94 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -19,15 +19,14 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/pkg/metabase" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/metabase" ) var ( metabaseUser = "crowdsec@crowdsec.net" metabasePassword string - metabaseDbPath string + metabaseDBPath string metabaseConfigPath string metabaseConfigFolder = "metabase/" metabaseConfigFile = "metabase.yaml" @@ -43,13 +42,13 @@ var ( // information needed to set up a random password on user's behalf ) -type cliDashboard struct{ +type cliDashboard struct { cfg configGetter } -func NewCLIDashboard(getconfig configGetter) *cliDashboard { +func NewCLIDashboard(cfg configGetter) *cliDashboard { return &cliDashboard{ - cfg: getconfig, + cfg: cfg, } } @@ -99,6 +98,7 @@ cscli dashboard remove metabaseContainerID = oldContainerID } } + return nil }, } @@ -127,8 +127,8 @@ cscli dashboard setup --listen 0.0.0.0 cscli dashboard setup -l 0.0.0.0 -p 443 --password `, RunE: func(_ *cobra.Command, _ []string) error { - if metabaseDbPath == "" { - metabaseDbPath = cli.cfg().ConfigPaths.DataDir + if metabaseDBPath == "" { + metabaseDBPath = cli.cfg().ConfigPaths.DataDir } if metabasePassword == "" { @@ -152,7 +152,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) if err != nil { return err } @@ -165,13 +165,14 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL) fmt.Printf("\tusername : '%s'\n", mb.Config.Username) fmt.Printf("\tpassword : '%s'\n", mb.Config.Password) + return nil }, } flags := cmd.Flags() flags.BoolVarP(&force, "force", "f", false, "Force setup : override existing files") - flags.StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container") + flags.StringVarP(&metabaseDBPath, "dir", "d", "", "Shared directory with metabase container") flags.StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") @@ -203,6 +204,7 @@ func (cli *cliDashboard) newStartCmd() *cobra.Command { } log.Infof("Started metabase") log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort) + return nil }, } @@ -241,6 +243,7 @@ func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { return err } log.Printf("'%s'", m.Config.Password) + return nil }, } @@ -313,6 +316,7 @@ cscli dashboard remove --force } } } + return nil }, } diff --git a/cmd/crowdsec-cli/dashboard_unsupported.go b/cmd/crowdsec-cli/dashboard_unsupported.go index 4cf8e18b503..cc80abd2528 100644 --- a/cmd/crowdsec-cli/dashboard_unsupported.go +++ b/cmd/crowdsec-cli/dashboard_unsupported.go @@ -13,9 +13,9 @@ type cliDashboard struct{ cfg configGetter } -func NewCLIDashboard(getconfig configGetter) *cliDashboard { +func NewCLIDashboard(cfg configGetter) *cliDashboard { return &cliDashboard{ - cfg: getconfig, + cfg: cfg, } } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index c5839ae0079..d7165367898 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -116,14 +116,13 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin return nil } - type cliDecisions struct { cfg configGetter } -func NewCLIDecisions(getconfig configGetter) *cliDecisions { +func NewCLIDecisions(cfg configGetter) *cliDecisions { return &cliDecisions{ - cfg: getconfig, + cfg: cfg, } } @@ -157,6 +156,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("creating api client: %w", err) } + return nil }, } @@ -393,6 +393,7 @@ cscli decisions add --scope username --value foobar } log.Info("Decision successfully added") + return nil }, } @@ -499,6 +500,7 @@ cscli decisions delete --type captcha } } log.Infof("%s decision(s) deleted", decisions.NbDeleted) + return nil }, } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index d3ce380bb6f..600e56889f7 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,13 +13,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct{ +type cliHub struct { cfg configGetter } -func NewCLIHub(getconfig configGetter) *cliHub { +func NewCLIHub(cfg configGetter) *cliHub { return &cliHub{ - cfg: getconfig, + cfg: cfg, } } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 1819bdcf5fb..7c9b9708c92 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -7,6 +7,7 @@ import ( "fmt" "math/big" "os" + "slices" "strings" "time" @@ -17,7 +18,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "slices" "github.com/crowdsecurity/machineid" @@ -106,14 +106,14 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) { return hb, true } -type cliMachines struct{ - db *database.Client +type cliMachines struct { + db *database.Client cfg configGetter } -func NewCLIMachines(getconfig configGetter) *cliMachines { +func NewCLIMachines(cfg configGetter) *cliMachines { return &cliMachines{ - cfg: getconfig, + cfg: cfg, } } @@ -136,6 +136,7 @@ Note: This command requires database direct access, so is intended to be run on if err != nil { return fmt.Errorf("unable to create new database client: %s", err) } + return nil }, } @@ -249,7 +250,7 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { var ( - err error + err error machineID string ) @@ -347,7 +348,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliMachines) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { machines, err := cli.db.ListMachines() if err != nil { cobra.CompError("unable to list machines " + err.Error()) @@ -447,9 +448,9 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b func (cli *cliMachines) newPruneCmd() *cobra.Command { var ( - duration time.Duration - notValidOnly bool - force bool + duration time.Duration + notValidOnly bool + force bool ) const defaultDuration = 10 * time.Minute diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 3b20cf112c0..62b85e63047 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -15,45 +15,88 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -var trace_lvl, dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool - var ConfigFilePath string var csConfig *csconfig.Config var dbClient *database.Client -var outputFormat string -var OutputColor string +type configGetter func() *csconfig.Config var mergedConfig string -// flagBranch overrides the value in csConfig.Cscli.HubBranch -var flagBranch = "" +type cliRoot struct { + logTrace bool + logDebug bool + logInfo bool + logWarn bool + logErr bool + outputColor string + outputFormat string + // flagBranch overrides the value in csConfig.Cscli.HubBranch + flagBranch string +} -type configGetter func() *csconfig.Config +func newCliRoot() *cliRoot { + return &cliRoot{} +} -func initConfig() { - var err error +// cfg() is a helper function to get the configuration loaded from config.yaml, +// we pass it to subcommands because the file is not read until the Execute() call +func (cli *cliRoot) cfg() *csconfig.Config { + return csConfig +} - if trace_lvl { - log.SetLevel(log.TraceLevel) - } else if dbg_lvl { - log.SetLevel(log.DebugLevel) - } else if nfo_lvl { - log.SetLevel(log.InfoLevel) - } else if wrn_lvl { - log.SetLevel(log.WarnLevel) - } else if err_lvl { - log.SetLevel(log.ErrorLevel) +// wantedLogLevel returns the log level requested in the command line flags. +func (cli *cliRoot) wantedLogLevel() log.Level { + switch { + case cli.logTrace: + return log.TraceLevel + case cli.logDebug: + return log.DebugLevel + case cli.logInfo: + return log.InfoLevel + case cli.logWarn: + return log.WarnLevel + case cli.logErr: + return log.ErrorLevel + default: + return log.InfoLevel + } +} + +// loadConfigFor loads the configuration file for the given sub-command. +// If the sub-command does not need it, it returns a default configuration. +func loadConfigFor(command string) (*csconfig.Config, string, error) { + noNeedConfig := []string{ + "doc", + "help", + "completion", + "version", + "hubtest", } - if !slices.Contains(NoNeedConfig, os.Args[1]) { + if !slices.Contains(noNeedConfig, command) { log.Debugf("Using %s as configuration file", ConfigFilePath) - csConfig, mergedConfig, err = csconfig.NewConfig(ConfigFilePath, false, false, true) + + config, merged, err := csconfig.NewConfig(ConfigFilePath, false, false, true) if err != nil { - log.Fatal(err) + return nil, "", err } - } else { - csConfig = csconfig.NewDefaultConfig() + + return config, merged, nil + } + + return csconfig.NewDefaultConfig(), "", nil +} + +// initialize is called before the subcommand is executed. +func (cli *cliRoot) initialize() { + var err error + + log.SetLevel(cli.wantedLogLevel()) + + csConfig, mergedConfig, err = loadConfigFor(os.Args[1]) + if err != nil { + log.Fatal(err) } // recap of the enabled feature flags, because logging @@ -62,12 +105,12 @@ func initConfig() { log.Debugf("Enabled feature flags: %s", fflist) } - if flagBranch != "" { - csConfig.Cscli.HubBranch = flagBranch + if cli.flagBranch != "" { + csConfig.Cscli.HubBranch = cli.flagBranch } - if outputFormat != "" { - csConfig.Cscli.Output = outputFormat + if cli.outputFormat != "" { + csConfig.Cscli.Output = cli.outputFormat } if csConfig.Cscli.Output == "" { @@ -85,11 +128,11 @@ func initConfig() { log.SetLevel(log.ErrorLevel) } - if OutputColor != "" { - csConfig.Cscli.Color = OutputColor + if cli.outputColor != "" { + csConfig.Cscli.Color = cli.outputColor - if OutputColor != "yes" && OutputColor != "no" && OutputColor != "auto" { - log.Fatalf("output color %s unknown", OutputColor) + if cli.outputColor != "yes" && cli.outputColor != "no" && cli.outputColor != "auto" { + log.Fatalf("output color %s unknown", cli.outputColor) } } } @@ -102,15 +145,25 @@ var validArgs = []string{ "postoverflows", "scenarios", "simulation", "support", "version", } -var NoNeedConfig = []string{ - "doc", - "help", - "completion", - "version", - "hubtest", +func (cli *cliRoot) colorize(cmd *cobra.Command) { + cc.Init(&cc.Config{ + RootCmd: cmd, + Headings: cc.Yellow, + Commands: cc.Green + cc.Bold, + CmdShortDescr: cc.Cyan, + Example: cc.Italic, + ExecName: cc.Bold, + Aliases: cc.Bold + cc.Italic, + FlagsDataType: cc.White, + Flags: cc.Green, + FlagsDescr: cc.Cyan, + NoExtraNewlines: true, + NoBottomNewline: true, + }) + cmd.SetOut(color.Output) } -func main() { +func (cli *cliRoot) NewCommand() *cobra.Command { // set the formatter asap and worry about level later logFormatter := &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true} log.SetFormatter(logFormatter) @@ -135,33 +188,25 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall /*TBD examples*/ } - cc.Init(&cc.Config{ - RootCmd: cmd, - Headings: cc.Yellow, - Commands: cc.Green + cc.Bold, - CmdShortDescr: cc.Cyan, - Example: cc.Italic, - ExecName: cc.Bold, - Aliases: cc.Bold + cc.Italic, - FlagsDataType: cc.White, - Flags: cc.Green, - FlagsDescr: cc.Cyan, - NoExtraNewlines: true, - NoBottomNewline: true, - }) - cmd.SetOut(color.Output) + cli.colorize(cmd) + + /*don't sort flags so we can enforce order*/ + cmd.Flags().SortFlags = false + + pflags := cmd.PersistentFlags() + pflags.SortFlags = false + + pflags.StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") + pflags.StringVarP(&cli.outputFormat, "output", "o", "", "Output format: human, json, raw") + pflags.StringVarP(&cli.outputColor, "color", "", "auto", "Output color: yes, no, auto") + pflags.BoolVar(&cli.logDebug, "debug", false, "Set logging to debug") + pflags.BoolVar(&cli.logInfo, "info", false, "Set logging to info") + pflags.BoolVar(&cli.logWarn, "warning", false, "Set logging to warning") + pflags.BoolVar(&cli.logErr, "error", false, "Set logging to error") + pflags.BoolVar(&cli.logTrace, "trace", false, "Set logging to trace") + pflags.StringVar(&cli.flagBranch, "branch", "", "Override hub branch on github") - cmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") - cmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", "", "Output format: human, json, raw") - cmd.PersistentFlags().StringVarP(&OutputColor, "color", "", "auto", "Output color: yes, no, auto") - cmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug") - cmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info") - cmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning") - cmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error") - cmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace") - cmd.PersistentFlags().StringVar(&flagBranch, "branch", "", "Override hub branch on github") - - if err := cmd.PersistentFlags().MarkHidden("branch"); err != nil { + if err := pflags.MarkHidden("branch"); err != nil { log.Fatalf("failed to hide flag: %s", err) } @@ -181,29 +226,20 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall } if len(os.Args) > 1 { - cobra.OnInitialize(initConfig) - } - - /*don't sort flags so we can enforce order*/ - cmd.Flags().SortFlags = false - cmd.PersistentFlags().SortFlags = false - - // we use a getter because the config is not initialized until the Execute() call - getconfig := func() *csconfig.Config { - return csConfig + cobra.OnInitialize(cli.initialize) } cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewConfigCmd()) - cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) - cmd.AddCommand(NewCLIMetrics(getconfig).NewCommand()) - cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) - cmd.AddCommand(NewCLIDecisions(getconfig).NewCommand()) + cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) - cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) - cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) - cmd.AddCommand(NewCLIMachines(getconfig).NewCommand()) + cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) cmd.AddCommand(NewLapiCmd()) cmd.AddCommand(NewCompletionCmd()) @@ -212,7 +248,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications().NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) - cmd.AddCommand(NewCLIPapi(getconfig).NewCommand()) + cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection().NewCommand()) cmd.AddCommand(NewCLIParser().NewCommand()) cmd.AddCommand(NewCLIScenario().NewCommand()) @@ -225,6 +261,11 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewSetupCmd()) } + return cmd +} + +func main() { + cmd := newCliRoot().NewCommand() if err := cmd.Execute(); err != nil { log.Fatal(err) } diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index c883c809291..7df52841ecb 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -50,18 +50,18 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ - "acquisition": statAcquis{}, - "buckets": statBucket{}, - "parsers": statParser{}, - "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, - "lapi-bouncer": statLapiBouncer{}, + "acquisition": statAcquis{}, + "buckets": statBucket{}, + "parsers": statParser{}, + "lapi": statLapi{}, + "lapi-machine": statLapiMachine{}, + "lapi-bouncer": statLapiBouncer{}, "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, - "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, + "decisions": statDecision{}, + "alerts": statAlert{}, + "stash": statStash{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, } } @@ -116,17 +116,21 @@ func (ms metricStore) Fetch(url string) error { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["name"] if !ok { log.Debugf("no name in Metric %v", metric.Labels) } + source, ok := metric.Labels["source"] if !ok { log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) @@ -153,6 +157,7 @@ func (ms metricStore) Fetch(url string) error { if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) } + ival := int(fval) switch fam.Name { /*buckets*/ @@ -303,9 +308,9 @@ type cliMetrics struct { cfg configGetter } -func NewCLIMetrics(getconfig configGetter) *cliMetrics { +func NewCLIMetrics(cfg configGetter) *cliMetrics { return &cliMetrics{ - cfg: getconfig, + cfg: cfg, } } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 04223ef93ab..e18af94d4bb 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -10,19 +10,18 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" "github.com/crowdsecurity/crowdsec/pkg/database" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) type cliPapi struct { cfg configGetter } -func NewCLIPapi(getconfig configGetter) *cliPapi { +func NewCLIPapi(cfg configGetter) *cliPapi { return &cliPapi{ - cfg: getconfig, + cfg: cfg, } } @@ -43,6 +42,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { if err := require.PAPI(cfg); err != nil { return err } + return nil }, } diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index a6e710c5747..6ccac761727 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -3,23 +3,23 @@ package main import ( "fmt" "os" + "slices" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "slices" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliSimulation struct{ +type cliSimulation struct { cfg configGetter } -func NewCLISimulation(getconfig configGetter) *cliSimulation { +func NewCLISimulation(cfg configGetter) *cliSimulation { return &cliSimulation{ - cfg: getconfig, + cfg: cfg, } } @@ -38,6 +38,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, if cli.cfg().Cscli.SimulationConfig == nil { return fmt.Errorf("no simulation configured") } + return nil }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { From 3208a40ef337a50a815d67f5337f9c31b6926cbf Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Tue, 6 Feb 2024 18:04:17 +0100 Subject: [PATCH 013/318] Dedicated whitelist metrics (#2813) * add proper whitelist metrics : both its own table and an extension to acquis metrics to track discarded/whitelisted lines --- .golangci.yml | 6 +- cmd/crowdsec-cli/metrics.go | 98 ++++++++++++++++++++++--------- cmd/crowdsec-cli/metrics_table.go | 88 +++++++++++++++++++++------ cmd/crowdsec/metrics.go | 4 +- pkg/parser/node.go | 4 +- pkg/parser/runtime.go | 18 ++++++ pkg/parser/whitelist.go | 14 ++++- pkg/parser/whitelist_test.go | 4 +- 8 files changed, 178 insertions(+), 58 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index dbeb96875c5..a3aacccb817 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,7 +11,7 @@ run: linters-settings: cyclop: # lower this after refactoring - max-complexity: 66 + max-complexity: 70 gci: sections: @@ -22,11 +22,11 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 145 + min-complexity: 150 gocyclo: # lower this after refactoring - min-complexity: 64 + min-complexity: 70 funlen: # Checks the number of lines in a function. diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 7df52841ecb..6b3155e5549 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -21,21 +21,22 @@ import ( ) type ( - statAcquis map[string]map[string]int - statParser map[string]map[string]int - statBucket map[string]map[string]int - statLapi map[string]map[string]int - statLapiMachine map[string]map[string]map[string]int - statLapiBouncer map[string]map[string]map[string]int + statAcquis map[string]map[string]int + statParser map[string]map[string]int + statBucket map[string]map[string]int + statWhitelist map[string]map[string]map[string]int + statLapi map[string]map[string]int + statLapiMachine map[string]map[string]map[string]int + statLapiBouncer map[string]map[string]map[string]int statLapiDecision map[string]struct { NonEmpty int Empty int } - statDecision map[string]map[string]map[string]int + statDecision map[string]map[string]map[string]int statAppsecEngine map[string]map[string]int - statAppsecRule map[string]map[string]map[string]int - statAlert map[string]int - statStash map[string]struct { + statAppsecRule map[string]map[string]map[string]int + statAlert map[string]int + statStash map[string]struct { Type string Count int } @@ -62,6 +63,7 @@ func NewMetricStore() metricStore { "stash": statStash{}, "appsec-engine": statAppsecEngine{}, "appsec-rule": statAppsecRule{}, + "whitelists": statWhitelist{}, } } @@ -111,6 +113,7 @@ func (ms metricStore) Fetch(url string) error { mAppsecRule := ms["appsec-rule"].(statAppsecRule) mAlert := ms["alerts"].(statAlert) mStash := ms["stash"].(statStash) + mWhitelist := ms["whitelists"].(statWhitelist) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { @@ -160,7 +163,9 @@ func (ms metricStore) Fetch(url string) error { ival := int(fval) switch fam.Name { - /*buckets*/ + // + // buckets + // case "cs_bucket_created_total": if _, ok := mBucket[name]; !ok { mBucket[name] = make(map[string]int) @@ -190,7 +195,9 @@ func (ms metricStore) Fetch(url string) error { mBucket[name] = make(map[string]int) } mBucket[name]["underflow"] += ival - /*acquis*/ + // + // parsers + // case "cs_parser_hits_total": if _, ok := mAcquis[source]; !ok { mAcquis[source] = make(map[string]int) @@ -221,6 +228,33 @@ func (ms metricStore) Fetch(url string) error { mParser[name] = make(map[string]int) } mParser[name]["unparsed"] += ival + // + // whitelists + // + case "cs_node_wl_hits_total": + if _, ok := mWhitelist[name]; !ok { + mWhitelist[name] = make(map[string]map[string]int) + } + if _, ok := mWhitelist[name][reason]; !ok { + mWhitelist[name][reason] = make(map[string]int) + } + mWhitelist[name][reason]["hits"] += ival + case "cs_node_wl_hits_ok_total": + if _, ok := mWhitelist[name]; !ok { + mWhitelist[name] = make(map[string]map[string]int) + } + if _, ok := mWhitelist[name][reason]; !ok { + mWhitelist[name][reason] = make(map[string]int) + } + mWhitelist[name][reason]["whitelisted"] += ival + // track as well whitelisted lines at acquis level + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) + } + mAcquis[source]["whitelisted"] += ival + // + // lapi + // case "cs_lapi_route_requests_total": if _, ok := mLapi[route]; !ok { mLapi[route] = make(map[string]int) @@ -256,6 +290,9 @@ func (ms metricStore) Fetch(url string) error { x.NonEmpty += ival } mLapiDecision[bouncer] = x + // + // decisions + // case "cs_active_decisions": if _, ok := mDecision[reason]; !ok { mDecision[reason] = make(map[string]map[string]int) @@ -265,15 +302,18 @@ func (ms metricStore) Fetch(url string) error { } mDecision[reason][origin][action] += ival case "cs_alerts": - /*if _, ok := mAlert[scenario]; !ok { - mAlert[scenario] = make(map[string]int) - }*/ mAlert[reason] += ival + // + // stash + // case "cs_cache_size": mStash[name] = struct { Type string Count int }{Type: mtype, Count: ival} + // + // appsec + // case "cs_appsec_reqs_total": if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) @@ -392,15 +432,15 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { func (cli *cliMetrics) NewCommand() *cobra.Command { var ( - url string + url string noUnit bool ) cmd := &cobra.Command{ - Use: "metrics", - Short: "Display crowdsec prometheus metrics.", - Long: `Fetch metrics from a Local API server and display them`, - Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") + Use: "metrics", + Short: "Display crowdsec prometheus metrics.", + Long: `Fetch metrics from a Local API server and display them`, + Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") cscli metrics # Show only some metrics, connect to a different url @@ -431,7 +471,7 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string { for _, section := range args { switch section { case "engine": - ret = append(ret, "acquisition", "parsers", "buckets", "stash") + ret = append(ret, "acquisition", "parsers", "buckets", "stash", "whitelists") case "lapi": ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") case "appsec": @@ -446,15 +486,15 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string { func (cli *cliMetrics) newShowCmd() *cobra.Command { var ( - url string + url string noUnit bool ) cmd := &cobra.Command{ - Use: "show [type]...", - Short: "Display all or part of the available metrics.", - Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, - Example: `# Show all Metrics, skip empty tables + Use: "show [type]...", + Short: "Display all or part of the available metrics.", + Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, + Example: `# Show all Metrics, skip empty tables cscli metrics show # Use an alias: "engine", "lapi" or "appsec" to show a group of metrics @@ -482,9 +522,9 @@ cscli metrics show acquisition parsers buckets stash -o json`, func (cli *cliMetrics) list() error { type metricType struct { - Type string `json:"type" yaml:"type"` - Title string `json:"title" yaml:"title"` - Description string `json:"description" yaml:"description"` + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` + Description string `json:"description" yaml:"description"` } var allMetrics []metricType diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 72f53f94c49..f11ee11bf88 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -45,6 +45,38 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i return numRows } +func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { + if t == nil { + return 0, fmt.Errorf("nil table") + } + + numRows := 0 + + for _, name := range maptools.SortedKeys(stats) { + for _, reason := range maptools.SortedKeys(stats[name]) { + row := make([]string, 4) + row[0] = name + row[1] = reason + row[2] = "-" + row[3] = "-" + + for _, action := range maptools.SortedKeys(stats[name][reason]) { + value := stats[name][reason][action] + if action == "whitelisted" { + row[3] = fmt.Sprintf("%d", value) + } else if action == "hits" { + row[2] = fmt.Sprintf("%d", value) + } else { + log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) + } + } + t.AddRow(row...) + numRows++ + } + } + return numRows, nil +} + func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, fmt.Errorf("nil table") @@ -95,7 +127,7 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting bucket stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -108,16 +140,16 @@ func (s statAcquis) Description() (string, string) { func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) - t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") + t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - keys := []string{"reads", "parsed", "unparsed", "pour"} + keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -137,7 +169,7 @@ func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -156,7 +188,7 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) - } else if numRows > 0 || showEmpty{ + } else if numRows > 0 || showEmpty { renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) t.Render() } @@ -164,6 +196,26 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { } +func (s statWhitelist) Description() (string, string) { + return "Whitelist Metrics", + `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` +} + +func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { + log.Warningf("while collecting parsers stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n"+title+":") + t.Render() + } +} + func (s statParser) Description() (string, string) { return "Parser Metrics", `Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.` @@ -181,7 +233,7 @@ func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -213,7 +265,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { } if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -254,7 +306,7 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -272,9 +324,9 @@ func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { numRows := lapiMetricsToTable(t, s) - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -294,7 +346,7 @@ func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -320,9 +372,9 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { numRows++ } - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -353,9 +405,9 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { } } - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -380,9 +432,9 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { numRows++ } - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index ca893872edb..fa2d8d5de32 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -161,7 +161,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, v1.LapiRouteHits, leaky.BucketsCurrentCount, - cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, + cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, parser.NodesWlHitsOk, parser.NodesWlHits, ) } else { log.Infof("Loading prometheus collectors") @@ -170,7 +170,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { globalCsInfo, globalParsingHistogram, globalPourHistogram, v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime, leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount, - globalActiveDecisions, globalAlerts, + globalActiveDecisions, globalAlerts, parser.NodesWlHitsOk, parser.NodesWlHits, cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, ) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 23ed20511c3..fe5432ce938 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -168,9 +168,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() } exprErr := error(nil) - isWhitelisted := n.CheckIPsWL(p.ParseIPSources()) + isWhitelisted := n.CheckIPsWL(p) if !isWhitelisted { - isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv) + isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p) } if exprErr != nil { // Previous code returned nil if there was an error, so we keep this behavior diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 4f4f6a0f3d0..afdf88dc873 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -221,6 +221,24 @@ var NodesHitsKo = prometheus.NewCounterVec( []string{"source", "type", "name"}, ) +// + +var NodesWlHitsOk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_wl_hits_ok_total", + Help: "Total events successfully whitelisted by node.", + }, + []string{"source", "type", "name", "reason"}, +) + +var NodesWlHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_wl_hits_total", + Help: "Total events processed by whitelist node.", + }, + []string{"source", "type", "name", "reason"}, +) + func stageidx(stage string, stages []string) int { for i, v := range stages { if stage == v { diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go index 027a9a2858a..f3739a49438 100644 --- a/pkg/parser/whitelist.go +++ b/pkg/parser/whitelist.go @@ -8,6 +8,7 @@ import ( "github.com/antonmedv/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/prometheus/client_golang/prometheus" ) type Whitelist struct { @@ -36,11 +37,13 @@ func (n *Node) ContainsIPLists() bool { return len(n.Whitelist.B_Ips) > 0 || len(n.Whitelist.B_Cidrs) > 0 } -func (n *Node) CheckIPsWL(srcs []net.IP) bool { +func (n *Node) CheckIPsWL(p *types.Event) bool { + srcs := p.ParseIPSources() isWhitelisted := false if !n.ContainsIPLists() { return isWhitelisted } + NodesWlHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() for _, src := range srcs { if isWhitelisted { break @@ -62,15 +65,19 @@ func (n *Node) CheckIPsWL(srcs []net.IP) bool { n.Logger.Tracef("whitelist: %s not in [%s]", src, v) } } + if isWhitelisted { + NodesWlHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() + } return isWhitelisted } -func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}) (bool, error) { +func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) { isWhitelisted := false if !n.ContainsExprLists() { return false, nil } + NodesWlHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() /* run whitelist expression tests anyway */ for eidx, e := range n.Whitelist.B_Exprs { //if we already know the event is whitelisted, skip the rest of the expressions @@ -94,6 +101,9 @@ func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}) (bool, error) { n.Logger.Errorf("unexpected type %t (%v) while running '%s'", output, output, n.Whitelist.Exprs[eidx]) } } + if isWhitelisted { + NodesWlHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() + } return isWhitelisted, nil } diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 8796aaedafe..501c655243d 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -289,9 +289,9 @@ func TestWhitelistCheck(t *testing.T) { var err error node.Whitelist = tt.whitelist node.CompileWLs() - isWhitelisted := node.CheckIPsWL(tt.event.ParseIPSources()) + isWhitelisted := node.CheckIPsWL(tt.event) if !isWhitelisted { - isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}) + isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}, tt.event) } require.NoError(t, err) require.Equal(t, tt.expected, isWhitelisted) From af1df0696b084997482729ba5ee3f7769270c7fe Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 7 Feb 2024 11:10:25 +0100 Subject: [PATCH 014/318] refact cscli metric processing (#2816) * typos * refact cscli metric processing * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/metrics.go | 185 +++++++------------------ cmd/crowdsec-cli/metrics_table.go | 217 ++++++++++++++++++++++++++---- 3 files changed, 240 insertions(+), 164 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a3aacccb817..f69bf66eaa5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,7 +22,7 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 150 + min-complexity: 145 gocyclo: # lower this after refactoring diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 6b3155e5549..6e23bcf12e4 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -42,8 +43,14 @@ type ( } ) +var ( + ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") + ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") + +) + type metricSection interface { - Table(io.Writer, bool, bool) + Table(out io.Writer, noUnit bool, showEmpty bool) Description() (string, string) } @@ -154,6 +161,9 @@ func (ms metricStore) Fetch(url string) error { origin := metric.Labels["origin"] action := metric.Labels["action"] + appsecEngine := metric.Labels["appsec_engine"] + appsecRule := metric.Labels["rule_name"] + mtype := metric.Labels["type"] fval, err := strconv.ParseFloat(value, 32) @@ -162,178 +172,78 @@ func (ms metricStore) Fetch(url string) error { } ival := int(fval) + switch fam.Name { // // buckets // case "cs_bucket_created_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["instantiation"] += ival + mBucket.Process(name, "instantiation", ival) case "cs_buckets": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["curr_count"] += ival + mBucket.Process(name, "curr_count", ival) case "cs_bucket_overflowed_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["overflow"] += ival + mBucket.Process(name, "overflow", ival) case "cs_bucket_poured_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mBucket[name]["pour"] += ival - mAcquis[source]["pour"] += ival + mBucket.Process(name, "pour", ival) + mAcquis.Process(source, "pour", ival) case "cs_bucket_underflowed_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["underflow"] += ival + mBucket.Process(name, "underflow", ival) // // parsers // case "cs_parser_hits_total": - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["reads"] += ival + mAcquis.Process(source, "reads", ival) case "cs_parser_hits_ok_total": - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["parsed"] += ival + mAcquis.Process(source, "parsed", ival) case "cs_parser_hits_ko_total": - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["unparsed"] += ival + mAcquis.Process(source, "unparsed", ival) case "cs_node_hits_total": - if _, ok := mParser[name]; !ok { - mParser[name] = make(map[string]int) - } - mParser[name]["hits"] += ival + mParser.Process(name, "hits", ival) case "cs_node_hits_ok_total": - if _, ok := mParser[name]; !ok { - mParser[name] = make(map[string]int) - } - mParser[name]["parsed"] += ival + mParser.Process(name, "parsed", ival) case "cs_node_hits_ko_total": - if _, ok := mParser[name]; !ok { - mParser[name] = make(map[string]int) - } - mParser[name]["unparsed"] += ival + mParser.Process(name, "unparsed", ival) // // whitelists // case "cs_node_wl_hits_total": - if _, ok := mWhitelist[name]; !ok { - mWhitelist[name] = make(map[string]map[string]int) - } - if _, ok := mWhitelist[name][reason]; !ok { - mWhitelist[name][reason] = make(map[string]int) - } - mWhitelist[name][reason]["hits"] += ival + mWhitelist.Process(name, reason, "hits", ival) case "cs_node_wl_hits_ok_total": - if _, ok := mWhitelist[name]; !ok { - mWhitelist[name] = make(map[string]map[string]int) - } - if _, ok := mWhitelist[name][reason]; !ok { - mWhitelist[name][reason] = make(map[string]int) - } - mWhitelist[name][reason]["whitelisted"] += ival + mWhitelist.Process(name, reason, "whitelisted", ival) // track as well whitelisted lines at acquis level - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["whitelisted"] += ival + mAcquis.Process(source, "whitelisted", ival) // // lapi // case "cs_lapi_route_requests_total": - if _, ok := mLapi[route]; !ok { - mLapi[route] = make(map[string]int) - } - mLapi[route][method] += ival + mLapi.Process(route, method, ival) case "cs_lapi_machine_requests_total": - if _, ok := mLapiMachine[machine]; !ok { - mLapiMachine[machine] = make(map[string]map[string]int) - } - if _, ok := mLapiMachine[machine][route]; !ok { - mLapiMachine[machine][route] = make(map[string]int) - } - mLapiMachine[machine][route][method] += ival + mLapiMachine.Process(machine, route, method, ival) case "cs_lapi_bouncer_requests_total": - if _, ok := mLapiBouncer[bouncer]; !ok { - mLapiBouncer[bouncer] = make(map[string]map[string]int) - } - if _, ok := mLapiBouncer[bouncer][route]; !ok { - mLapiBouncer[bouncer][route] = make(map[string]int) - } - mLapiBouncer[bouncer][route][method] += ival + mLapiBouncer.Process(bouncer, route, method, ival) case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": - if _, ok := mLapiDecision[bouncer]; !ok { - mLapiDecision[bouncer] = struct { - NonEmpty int - Empty int - }{} - } - x := mLapiDecision[bouncer] - if fam.Name == "cs_lapi_decisions_ko_total" { - x.Empty += ival - } else if fam.Name == "cs_lapi_decisions_ok_total" { - x.NonEmpty += ival - } - mLapiDecision[bouncer] = x + mLapiDecision.Process(bouncer, fam.Name, ival) // // decisions // case "cs_active_decisions": - if _, ok := mDecision[reason]; !ok { - mDecision[reason] = make(map[string]map[string]int) - } - if _, ok := mDecision[reason][origin]; !ok { - mDecision[reason][origin] = make(map[string]int) - } - mDecision[reason][origin][action] += ival + mDecision.Process(reason, origin, action, ival) case "cs_alerts": - mAlert[reason] += ival + mAlert.Process(reason, ival) // // stash // case "cs_cache_size": - mStash[name] = struct { - Type string - Count int - }{Type: mtype, Count: ival} + mStash.Process(name, mtype, ival) // // appsec // case "cs_appsec_reqs_total": - if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { - mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) - } - mAppsecEngine[metric.Labels["appsec_engine"]]["processed"] = ival + mAppsecEngine.Process(appsecEngine, "processed", ival) case "cs_appsec_block_total": - if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { - mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) - } - mAppsecEngine[metric.Labels["appsec_engine"]]["blocked"] = ival + mAppsecEngine.Process(appsecEngine, "blocked", ival) case "cs_appsec_rule_hits": - appsecEngine := metric.Labels["appsec_engine"] - ruleID := metric.Labels["rule_name"] - if _, ok := mAppsecRule[appsecEngine]; !ok { - mAppsecRule[appsecEngine] = make(map[string]map[string]int, 0) - } - if _, ok := mAppsecRule[appsecEngine][ruleID]; !ok { - mAppsecRule[appsecEngine][ruleID] = make(map[string]int, 0) - } - mAppsecRule[appsecEngine][ruleID]["triggered"] = ival + mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival) default: log.Debugf("unknown: %+v", fam.Name) continue @@ -380,13 +290,13 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string case "json": x, err := json.MarshalIndent(want, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal metrics : %v", err) + return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) case "raw": x, err := yaml.Marshal(want) if err != nil { - return fmt.Errorf("failed to unmarshal metrics : %v", err) + return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) default: @@ -404,11 +314,11 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { } if cfg.Prometheus == nil { - return fmt.Errorf("prometheus section missing, can't show metrics") + return ErrMissingConfig } if !cfg.Prometheus.Enabled { - return fmt.Errorf("prometheus is not enabled, can't show metrics") + return ErrMetricsDisabled } ms := NewMetricStore() @@ -427,6 +337,7 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil { return err } + return nil } @@ -468,6 +379,7 @@ cscli metrics list`, // expandAlias returns a list of sections. The input can be a list of sections or alias. func (cli *cliMetrics) expandSectionGroups(args []string) []string { ret := []string{} + for _, section := range args { switch section { case "engine": @@ -522,8 +434,8 @@ cscli metrics show acquisition parsers buckets stash -o json`, func (cli *cliMetrics) list() error { type metricType struct { - Type string `json:"type" yaml:"type"` - Title string `json:"title" yaml:"title"` + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` Description string `json:"description" yaml:"description"` } @@ -553,13 +465,13 @@ func (cli *cliMetrics) list() error { case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal metrics: %w", err) + return fmt.Errorf("failed to marshal metric types: %w", err) } fmt.Println(string(x)) case "raw": x, err := yaml.Marshal(allMetrics) if err != nil { - return fmt.Errorf("failed to unmarshal metrics: %w", err) + return fmt.Errorf("failed to marshal metric types: %w", err) } fmt.Println(string(x)) } @@ -575,8 +487,7 @@ func (cli *cliMetrics) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - cli.list() - return nil + return cli.list() }, } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index f11ee11bf88..da6ea3d9f1d 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "sort" + "strconv" "github.com/aquasecurity/table" log "github.com/sirupsen/logrus" @@ -11,17 +12,21 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" ) +// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. +var ErrNilTable = fmt.Errorf("nil table") + func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count - // sort keys to keep consistent order when printing machineKeys := []string{} for k := range stats { machineKeys = append(machineKeys, k) } + sort.Strings(machineKeys) numRows := 0 + for _, machine := range machineKeys { // oneRow: route -> method -> count machineRow := stats[machine] @@ -33,53 +38,60 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i methodName, } if count != 0 { - row = append(row, fmt.Sprintf("%d", count)) + row = append(row, strconv.Itoa(count)) } else { row = append(row, "-") } + t.AddRow(row...) numRows++ } } } + return numRows } func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { - return 0, fmt.Errorf("nil table") + return 0, ErrNilTable } numRows := 0 for _, name := range maptools.SortedKeys(stats) { for _, reason := range maptools.SortedKeys(stats[name]) { - row := make([]string, 4) - row[0] = name - row[1] = reason - row[2] = "-" - row[3] = "-" + row := []string{ + name, + reason, + "-", + "-", + } for _, action := range maptools.SortedKeys(stats[name][reason]) { value := stats[name][reason][action] - if action == "whitelisted" { - row[3] = fmt.Sprintf("%d", value) - } else if action == "hits" { - row[2] = fmt.Sprintf("%d", value) - } else { + + switch action { + case "whitelisted": + row[3] = strconv.Itoa(value) + case "hits": + row[2] = strconv.Itoa(value) + default: log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) } } + t.AddRow(row...) numRows++ } } + return numRows, nil } func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { - return 0, fmt.Errorf("nil table") + return 0, ErrNilTable } numRows := 0 @@ -89,12 +101,14 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri if !ok { continue } + row := []string{ alabel, } + for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { - numberToShow := fmt.Sprintf("%d", v) + numberToShow := strconv.Itoa(v) if !noUnit { numberToShow = formatNumber(v) } @@ -104,15 +118,26 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri row = append(row, "-") } } + t.AddRow(row...) numRows++ } + return numRows, nil } func (s statBucket) Description() (string, string) { return "Bucket Metrics", - `Measure events in different scenarios. Current count is the number of buckets during metrics collection. Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` + `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + + `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` +} + +func (s statBucket) Process(bucket, metric string, val int) { + if _, ok := s[bucket]; !ok { + s[bucket] = make(map[string]int) + } + + s[bucket][metric] += val } func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -134,7 +159,18 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { func (s statAcquis) Description() (string, string) { return "Acquisition Metrics", - `Measures the lines read, parsed, and unparsed per datasource. Zero read lines indicate a misconfigured or inactive datasource. Zero parsed lines mean the parser(s) failed. Non-zero parsed lines are fine as crowdsec selects relevant lines.` + `Measures the lines read, parsed, and unparsed per datasource. ` + + `Zero read lines indicate a misconfigured or inactive datasource. ` + + `Zero parsed lines mean the parser(s) failed. ` + + `Non-zero parsed lines are fine as crowdsec selects relevant lines.` +} + +func (s statAcquis) Process(source, metric string, val int) { + if _, ok := s[source]; !ok { + s[source] = make(map[string]int) + } + + s[source][metric] += val } func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -159,12 +195,22 @@ func (s statAppsecEngine) Description() (string, string) { `Measures the number of parsed and blocked requests by the AppSec Component.` } +func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]int) + } + + s[appsecEngine][metric] += val +} + func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") t.SetAlignment(table.AlignLeft, table.AlignLeft) + keys := []string{"processed", "blocked"} + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { @@ -179,13 +225,27 @@ func (s statAppsecRule) Description() (string, string) { `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` } +func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]map[string]int) + } + + if _, ok := s[appsecEngine][appsecRule]; !ok { + s[appsecEngine][appsecRule] = make(map[string]int) + } + + s[appsecEngine][appsecRule][metric] += val +} + func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { for appsecEngine, appsecEngineRulesStats := range s { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") t.SetAlignment(table.AlignLeft, table.AlignLeft) + keys := []string{"triggered"} + if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { @@ -193,7 +253,6 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { t.Render() } } - } func (s statWhitelist) Description() (string, string) { @@ -201,6 +260,18 @@ func (s statWhitelist) Description() (string, string) { `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` } +func (s statWhitelist) Process(whitelist, reason, metric string, val int) { + if _, ok := s[whitelist]; !ok { + s[whitelist] = make(map[string]map[string]int) + } + + if _, ok := s[whitelist][reason]; !ok { + s[whitelist][reason] = make(map[string]int) + } + + s[whitelist][reason][metric] += val +} + func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -218,7 +289,17 @@ func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { func (s statParser) Description() (string, string) { return "Parser Metrics", - `Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.` + `Tracks the number of events processed by each parser and indicates success of failure. ` + + `Zero parsed lines means the parer(s) failed. ` + + `Non-zero unparsed lines are fine as crowdsec select relevant lines.` +} + +func (s statParser) Process(parser, metric string, val int) { + if _, ok := s[parser]; !ok { + s[parser] = make(map[string]int) + } + + s[parser][metric] += val } func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -243,6 +324,16 @@ func (s statStash) Description() (string, string) { `Tracks the status of stashes that might be created by various parsers and scenarios.` } +func (s statStash) Process(name, mtype string, val int) { + s[name] = struct { + Type string + Count int + }{ + Type: mtype, + Count: val, + } +} + func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -258,11 +349,12 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { row := []string{ alabel, astats.Type, - fmt.Sprintf("%d", astats.Count), + strconv.Itoa(astats.Count), } t.AddRow(row...) numRows++ } + if numRows > 0 || showEmpty { title, _ := s.Description() renderTableTitle(out, "\n"+title+":") @@ -275,6 +367,14 @@ func (s statLapi) Description() (string, string) { `Monitors the requests made to local API routes.` } +func (s statLapi) Process(route, method string, val int) { + if _, ok := s[route]; !ok { + s[route] = make(map[string]int) + } + + s[route][method] += val +} + func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -291,13 +391,14 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { for skey := range astats { subKeys = append(subKeys, skey) } + sort.Strings(subKeys) for _, sl := range subKeys { row := []string{ alabel, sl, - fmt.Sprintf("%d", astats[sl]), + strconv.Itoa(astats[sl]), } t.AddRow(row...) numRows++ @@ -316,6 +417,18 @@ func (s statLapiMachine) Description() (string, string) { `Tracks the number of calls to the local API from each registered machine.` } +func (s statLapiMachine) Process(machine, route, method string, val int) { + if _, ok := s[machine]; !ok { + s[machine] = make(map[string]map[string]int) + } + + if _, ok := s[machine][route]; !ok { + s[machine][route] = make(map[string]int) + } + + s[machine][route][method] += val +} + func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -336,6 +449,18 @@ func (s statLapiBouncer) Description() (string, string) { `Tracks total hits to remediation component related API routes.` } +func (s statLapiBouncer) Process(bouncer, route, method string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = make(map[string]map[string]int) + } + + if _, ok := s[bouncer][route]; !ok { + s[bouncer][route] = make(map[string]int) + } + + s[bouncer][route][method] += val +} + func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -356,6 +481,26 @@ func (s statLapiDecision) Description() (string, string) { `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` } +func (s statLapiDecision) Process(bouncer, fam string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = struct { + NonEmpty int + Empty int + }{} + } + + x := s[bouncer] + + switch fam { + case "cs_lapi_decisions_ko_total": + x.Empty += val + case "cs_lapi_decisions_ok_total": + x.NonEmpty += val + } + + s[bouncer] = x +} + func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -363,11 +508,12 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 + for bouncer, hits := range s { t.AddRow( bouncer, - fmt.Sprintf("%d", hits.Empty), - fmt.Sprintf("%d", hits.NonEmpty), + strconv.Itoa(hits.Empty), + strconv.Itoa(hits.NonEmpty), ) numRows++ } @@ -381,7 +527,20 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { func (s statDecision) Description() (string, string) { return "Local API Decisions", - `Provides information about all currently active decisions. Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` + `Provides information about all currently active decisions. ` + + `Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` +} + +func (s statDecision) Process(reason, origin, action string, val int) { + if _, ok := s[reason]; !ok { + s[reason] = make(map[string]map[string]int) + } + + if _, ok := s[reason][origin]; !ok { + s[reason][origin] = make(map[string]int) + } + + s[reason][origin][action] += val } func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -391,6 +550,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 + for reason, origins := range s { for origin, actions := range origins { for action, hits := range actions { @@ -398,7 +558,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { reason, origin, action, - fmt.Sprintf("%d", hits), + strconv.Itoa(hits), ) numRows++ } @@ -417,6 +577,10 @@ func (s statAlert) Description() (string, string) { `Tracks the total number of past and present alerts for the installed scenarios.` } +func (s statAlert) Process(reason string, val int) { + s[reason] += val +} + func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -424,10 +588,11 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { t.SetAlignment(table.AlignLeft, table.AlignLeft) numRows := 0 + for scenario, hits := range s { t.AddRow( scenario, - fmt.Sprintf("%d", hits), + strconv.Itoa(hits), ) numRows++ } From df159b016705aa78ac64020634d66152efd0e76b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:55:24 +0100 Subject: [PATCH 015/318] update calls to deprecated x509 methods (#2824) --- .github/workflows/docker-tests.yml | 4 +- .golangci.yml | 4 - pkg/apiserver/middlewares/v1/api_key.go | 2 +- pkg/apiserver/middlewares/v1/tls_auth.go | 104 ++++++++++++----------- test/bats/11_bouncers_tls.bats | 3 + test/bats/30_machines_tls.bats | 7 +- 6 files changed, 65 insertions(+), 59 deletions(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 7bc63de0178..d3ae4f90d79 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -50,7 +50,7 @@ jobs: cache-to: type=gha,mode=min - name: "Setup Python" - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.x" @@ -61,7 +61,7 @@ jobs: - name: "Cache virtualenvs" id: cache-pipenv - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.local/share/virtualenvs key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} diff --git a/.golangci.yml b/.golangci.yml index f69bf66eaa5..3161b2c0aaf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -310,10 +310,6 @@ issues: # Will fix, might be trickier # - - linters: - - staticcheck - text: "x509.ParseCRL has been deprecated since Go 1.19: Use ParseRevocationList instead" - # https://github.com/pkg/errors/issues/245 - linters: - depguard diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index ae7645e1b85..41ee15b4417 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -66,7 +66,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { validCert, extractedCN, err := a.TlsAuth.ValidateCert(c) if !validCert { - logger.Errorf("invalid client certificate: %s", err) + logger.Error(err) return nil } diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index 904f6cd445a..bd2c4bb30e7 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto" "crypto/x509" + "encoding/pem" "fmt" "io" "net/http" @@ -19,14 +20,13 @@ import ( type TLSAuth struct { AllowedOUs []string CrlPath string - revokationCache map[string]cacheEntry + revocationCache map[string]cacheEntry cacheExpiration time.Duration logger *log.Entry } type cacheEntry struct { revoked bool - err error timestamp time.Time } @@ -89,10 +89,12 @@ func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { return false } -func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { - if cert.OCSPServer == nil || (cert.OCSPServer != nil && len(cert.OCSPServer) == 0) { +// isOCSPRevoked checks if the client certificate is revoked by any of the OCSP servers present in the certificate. +// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the OCSP check was successful and could be cached. +func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { + if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { ta.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") - return false, nil + return false, true } for _, server := range cert.OCSPServer { @@ -104,9 +106,10 @@ func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificat switch ocspResponse.Status { case ocsp.Good: - return false, nil + return false, true case ocsp.Revoked: - return true, fmt.Errorf("client certificate is revoked by server %s", server) + ta.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server) + return true, true case ocsp.Unknown: log.Debugf("unknow OCSP status for server %s", server) continue @@ -115,83 +118,82 @@ func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificat log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") - return true, nil + return true, false } -func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, error) { +// isCRLRevoked checks if the client certificate is revoked by the CRL present in the CrlPath. +// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the CRL check was successful and could be cached. +func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) { if ta.CrlPath == "" { - ta.logger.Warn("no crl_path, skipping CRL check") - return false, nil + ta.logger.Info("no crl_path, skipping CRL check") + return false, true } crlContent, err := os.ReadFile(ta.CrlPath) if err != nil { - ta.logger.Warnf("could not read CRL file, skipping check: %s", err) - return false, nil + ta.logger.Errorf("could not read CRL file, skipping check: %s", err) + return false, false } - crl, err := x509.ParseCRL(crlContent) + crlBinary, rest := pem.Decode(crlContent) + if len(rest) > 0 { + ta.logger.Warn("CRL file contains more than one PEM block, ignoring the rest") + } + + crl, err := x509.ParseRevocationList(crlBinary.Bytes) if err != nil { - ta.logger.Warnf("could not parse CRL file, skipping check: %s", err) - return false, nil + ta.logger.Errorf("could not parse CRL file, skipping check: %s", err) + return false, false } - if crl.HasExpired(time.Now().UTC()) { + now := time.Now().UTC() + + if now.After(crl.NextUpdate) { ta.logger.Warn("CRL has expired, will still validate the cert against it.") } - for _, revoked := range crl.TBSCertList.RevokedCertificates { + if now.Before(crl.ThisUpdate) { + ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") + } + + for _, revoked := range crl.RevokedCertificateEntries { if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { - return true, fmt.Errorf("client certificate is revoked by CRL") + ta.logger.Warn("client certificate is revoked by CRL") + return true, true } } - return false, nil + return false, true } func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { sn := cert.SerialNumber.String() - if cacheValue, ok := ta.revokationCache[sn]; ok { + if cacheValue, ok := ta.revocationCache[sn]; ok { if time.Now().UTC().Sub(cacheValue.timestamp) < ta.cacheExpiration { - ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t | %s", sn, cacheValue.revoked, cacheValue.err) - return cacheValue.revoked, cacheValue.err - } else { - ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") - delete(ta.revokationCache, sn) + ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t", sn, cacheValue.revoked) + return cacheValue.revoked, nil } + + ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") + delete(ta.revocationCache, sn) } else { ta.logger.Tracef("TLSAuth: no cached value for cert %s", sn) } - revoked, err := ta.isOCSPRevoked(cert, issuer) - if err != nil { - ta.revokationCache[sn] = cacheEntry{ - revoked: revoked, - err: err, - timestamp: time.Now().UTC(), - } + revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer) - return true, err - } + revokedByCRL, cacheCRL := ta.isCRLRevoked(cert) - if revoked { - ta.revokationCache[sn] = cacheEntry{ + revoked := revokedByOCSP || revokedByCRL + + if cacheOCSP && cacheCRL { + ta.revocationCache[sn] = cacheEntry{ revoked: revoked, - err: err, timestamp: time.Now().UTC(), } - - return true, nil - } - - revoked, err = ta.isCRLRevoked(cert) - ta.revokationCache[sn] = cacheEntry{ - revoked: revoked, - err: err, - timestamp: time.Now().UTC(), } - return revoked, err + return revoked, nil } func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { @@ -265,11 +267,11 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { revoked, err := ta.isInvalid(clientCert, c.Request.TLS.VerifiedChains[0][1]) if err != nil { ta.logger.Errorf("TLSAuth: error checking if client certificate is revoked: %s", err) - return false, "", fmt.Errorf("could not check for client certification revokation status: %w", err) + return false, "", fmt.Errorf("could not check for client certification revocation status: %w", err) } if revoked { - return false, "", fmt.Errorf("client certificate is revoked") + return false, "", fmt.Errorf("client certificate for CN=%s OU=%s is revoked", clientCert.Subject.CommonName, clientCert.Subject.OrganizationalUnit) } ta.logger.Debugf("client OU %v is allowed vs required OU %v", clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) @@ -282,7 +284,7 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { ta := &TLSAuth{ - revokationCache: map[string]cacheEntry{}, + revocationCache: map[string]cacheEntry{}, cacheExpiration: cacheExpiration, CrlPath: crlPath, logger: logger, diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 8fb4579259d..2c39aae3079 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -90,7 +90,10 @@ teardown() { } @test "simulate one bouncer request with a revoked certificate" { + truncate_log rune -0 curl -i -s --cert "${tmpdir}/bouncer_revoked.pem" --key "${tmpdir}/bouncer_revoked-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" assert_output --partial "access forbidden" rune -0 cscli bouncers list -o json assert_output "[]" diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 535435336ba..311293ca70c 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -132,13 +132,15 @@ teardown() { ' config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start + rune -1 cscli lapi status rune -0 cscli machines list -o json assert_output '[]' } @test "revoked cert for agent" { + truncate_log config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' - .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | .key_path=strenv(tmpdir) + "/agent_revoked-key.pem" | .cert_path=strenv(tmpdir) + "/agent_revoked.pem" | .url="https://127.0.0.1:8080" @@ -146,6 +148,9 @@ teardown() { config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start + rune -1 cscli lapi status + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" rune -0 cscli machines list -o json assert_output '[]' } From fa56d35a483f89b0fb1bd74a9c3ee4ae4ca31623 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 9 Feb 2024 13:37:49 +0000 Subject: [PATCH 016/318] [Loki] Set headers/basic auth if set for queryRange (#2815) --- .../loki/internal/lokiclient/loki_client.go | 41 ++++++++++++------- pkg/acquisition/modules/loki/loki_test.go | 33 ++++++++++----- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 8451a86fcdf..d2af4e8af28 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -25,6 +25,7 @@ type LokiClient struct { t *tomb.Tomb fail_start time.Time currentTickerInterval time.Duration + requestHeaders map[string]string } type Config struct { @@ -116,7 +117,7 @@ func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQu case <-lc.t.Dying(): return lc.t.Err() case <-ticker.C: - resp, err := http.Get(uri) + resp, err := lc.Get(uri) if err != nil { if ok := lc.shouldRetry(); !ok { return errors.Wrapf(err, "error querying range") @@ -127,6 +128,7 @@ func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQu } if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode) body, _ := io.ReadAll(resp.Body) resp.Body.Close() if ok := lc.shouldRetry(); !ok { @@ -215,7 +217,7 @@ func (lc *LokiClient) Ready(ctx context.Context) error { return lc.t.Err() case <-tick.C: lc.Logger.Debug("Checking if Loki is ready") - resp, err := http.Get(url) + resp, err := lc.Get(url) if err != nil { lc.Logger.Warnf("Error checking if Loki is ready: %s", err) continue @@ -251,10 +253,9 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { } requestHeader := http.Header{} - for k, v := range lc.config.Headers { + for k, v := range lc.requestHeaders { requestHeader.Add(k, v) } - requestHeader.Set("User-Agent", "Crowdsec "+cwversion.VersionStr()) lc.Logger.Infof("Connecting to %s", u) conn, _, err := dialer.Dial(u, requestHeader) @@ -293,16 +294,6 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, time.Now().Add(-lc.config.Since)) - requestHeader := http.Header{} - for k, v := range lc.config.Headers { - requestHeader.Add(k, v) - } - - if lc.config.Username != "" || lc.config.Password != "" { - requestHeader.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(lc.config.Username+":"+lc.config.Password))) - } - - requestHeader.Set("User-Agent", "Crowdsec "+cwversion.VersionStr()) lc.Logger.Infof("Connecting to %s", url) lc.t.Go(func() error { return lc.queryRange(url, ctx, c, infinite) @@ -310,6 +301,26 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ return c } +// Create a wrapper for http.Get to be able to set headers and auth +func (lc *LokiClient) Get(url string) (*http.Response, error) { + request, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + for k, v := range lc.requestHeaders { + request.Header.Add(k, v) + } + return http.DefaultClient.Do(request) +} + func NewLokiClient(config Config) *LokiClient { - return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config} + headers := make(map[string]string) + for k, v := range config.Headers { + headers[k] = v + } + if config.Username != "" || config.Password != "" { + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) + } + headers["User-Agent"] = "Crowdsec " + cwversion.VersionStr() + return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index fae2e3aa98f..6cac1c0fec3 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -276,10 +276,17 @@ func feedLoki(logger *log.Entry, n int, title string) error { if err != nil { return err } - resp, err := http.Post("http://127.0.0.1:3100/loki/api/v1/push", "application/json", bytes.NewBuffer(buff)) + req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Scope-OrgID", "1234") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() if resp.StatusCode != http.StatusNoContent { b, _ := io.ReadAll(resp.Body) logger.Error(string(b)) @@ -306,6 +313,8 @@ mode: cat source: loki url: http://127.0.0.1:3100 query: '{server="demo",key="%s"}' +headers: + x-scope-orgid: "1234" since: 1h `, title), }, @@ -362,26 +371,26 @@ func TestStreamingAcquisition(t *testing.T) { }{ { name: "Bad port", - config: ` -mode: tail + config: `mode: tail source: loki -url: http://127.0.0.1:3101 +url: "http://127.0.0.1:3101" +headers: + x-scope-orgid: "1234" query: > - {server="demo"} -`, // No Loki server here + {server="demo"}`, // No Loki server here expectedErr: "", streamErr: `loki is not ready: context deadline exceeded`, expectedLines: 0, }, { name: "ok", - config: ` -mode: tail + config: `mode: tail source: loki -url: http://127.0.0.1:3100 +url: "http://127.0.0.1:3100" +headers: + x-scope-orgid: "1234" query: > - {server="demo"} -`, + {server="demo"}`, expectedErr: "", streamErr: "", expectedLines: 20, @@ -456,6 +465,8 @@ func TestStopStreaming(t *testing.T) { mode: tail source: loki url: http://127.0.0.1:3100 +headers: + x-scope-orgid: "1234" query: > {server="demo"} ` From 332af5dd8dd7a546c94758390e404f3ecf428fda Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 9 Feb 2024 14:39:34 +0100 Subject: [PATCH 017/318] appsec: split return code for bouncer and user (#2821) --- pkg/acquisition/modules/appsec/appsec.go | 8 +- .../modules/appsec/appsec_runner.go | 7 +- pkg/acquisition/modules/appsec/appsec_test.go | 735 ++++++++++++++++-- pkg/appsec/appsec.go | 146 ++-- 4 files changed, 777 insertions(+), 119 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 030724fc3e9..4e2ff0bd22b 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -354,15 +354,17 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { w.InChan <- parsedRequest + /* + response is a copy of w.AppSecRuntime.Response that is safe to use. + As OutOfBand might still be running, the original one can be modified + */ response := <-parsedRequest.ResponseChannel - statusCode := http.StatusOK if response.InBandInterrupt { - statusCode = http.StatusForbidden AppsecBlockCounter.With(prometheus.Labels{"source": parsedRequest.RemoteAddrNormalized, "appsec_engine": parsedRequest.AppsecEngine}).Inc() } - appsecResponse := w.AppsecRuntime.GenerateResponse(response, logger) + statusCode, appsecResponse := w.AppsecRuntime.GenerateResponse(response, logger) logger.Debugf("Response: %+v", appsecResponse) rw.WriteHeader(statusCode) diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index a9d74aa8f63..cc7264aa2c8 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -226,7 +226,8 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { if in := request.Tx.Interruption(); in != nil { r.logger.Debugf("inband rules matched : %d", in.RuleID) r.AppsecRuntime.Response.InBandInterrupt = true - r.AppsecRuntime.Response.HTTPResponseCode = r.AppsecRuntime.Config.BlockedHTTPCode + r.AppsecRuntime.Response.BouncerHTTPResponseCode = r.AppsecRuntime.Config.BouncerBlockedHTTPCode + r.AppsecRuntime.Response.UserHTTPResponseCode = r.AppsecRuntime.Config.UserBlockedHTTPCode r.AppsecRuntime.Response.Action = r.AppsecRuntime.DefaultRemediation if _, ok := r.AppsecRuntime.RemediationById[in.RuleID]; ok { @@ -252,7 +253,9 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { r.logger.Errorf("unable to generate appsec event : %s", err) return } - r.outChan <- *appsecOvlfw + if appsecOvlfw != nil { + r.outChan <- *appsecOvlfw + } } // Should the in band match trigger an event ? diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 2a58580137d..25aea0c78ea 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1,6 +1,7 @@ package appsecacquisition import ( + "net/http" "net/url" "testing" "time" @@ -21,16 +22,21 @@ Missing tests (wip): */ type appsecRuleTest struct { - name string - expected_load_ok bool - inband_rules []appsec_rule.CustomRule - outofband_rules []appsec_rule.CustomRule - on_load []appsec.Hook - pre_eval []appsec.Hook - post_eval []appsec.Hook - on_match []appsec.Hook - input_request appsec.ParsedRequest - output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse) + name string + expected_load_ok bool + inband_rules []appsec_rule.CustomRule + outofband_rules []appsec_rule.CustomRule + on_load []appsec.Hook + pre_eval []appsec.Hook + post_eval []appsec.Hook + on_match []appsec.Hook + BouncerBlockedHTTPCode int + UserBlockedHTTPCode int + UserPassedHTTPCode int + DefaultRemediation string + DefaultPassAction string + input_request appsec.ParsedRequest + output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } func TestAppsecOnMatchHooks(t *testing.T) { @@ -53,13 +59,14 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) require.Len(t, responses, 1) - require.Equal(t, 403, responses[0].HTTPResponseCode) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 403, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, @@ -84,17 +91,18 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) require.Len(t, responses, 1) - require.Equal(t, 413, responses[0].HTTPResponseCode) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 413, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, { - name: "on_match: change action to another standard one (log)", + name: "on_match: change action to a non standard one (log)", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -114,7 +122,7 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) @@ -143,16 +151,16 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) require.Len(t, responses, 1) - require.Equal(t, "allow", responses[0].Action) + require.Equal(t, appsec.AllowRemediation, responses[0].Action) }, }, { - name: "on_match: change action to another standard one (deny/ban/block)", + name: "on_match: change action to another standard one (ban)", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -164,7 +172,7 @@ func TestAppsecOnMatchHooks(t *testing.T) { }, }, on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('deny')"}}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, }, input_request: appsec.ParsedRequest{ RemoteAddr: "1.2.3.4", @@ -172,10 +180,10 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, responses, 1) //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, { @@ -199,10 +207,10 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, responses, 1) //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, "captcha", responses[0].Action) + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) }, }, { @@ -226,7 +234,7 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) @@ -255,11 +263,11 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 1) require.Equal(t, types.LOG, events[0].Type) require.Len(t, responses, 1) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, { @@ -283,11 +291,11 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 1) require.Equal(t, types.APPSEC, events[0].Type) require.Len(t, responses, 1) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, } @@ -328,7 +336,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -356,7 +364,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) @@ -391,7 +399,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -419,7 +427,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -447,7 +455,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -472,7 +480,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 1) require.Equal(t, types.LOG, events[0].Type) require.True(t, events[0].Appsec.HasOutBandMatches) @@ -506,7 +514,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Len(t, responses, 1) require.Equal(t, "foobar", responses[0].Action) @@ -533,7 +541,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Len(t, responses, 1) require.Equal(t, "foobar", responses[0].Action) @@ -560,10 +568,12 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Len(t, responses, 1) require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) }, }, } @@ -574,6 +584,473 @@ func TestAppsecPreEvalHooks(t *testing.T) { }) } } + +func TestAppsecRemediationConfigHooks(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} +func TestOnMatchRemediationHooks(t *testing.T) { + tests := []appsecRuleTest{ + { + name: "set remediation to allow with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "set remediation to captcha + custom user code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecDefaultPassRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic non-matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: pass", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 200, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecDefaultRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to ban (default)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "ban", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom remediation + HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + DefaultRemediation: "foobar", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + func TestAppsecRuleMatches(t *testing.T) { /* @@ -601,7 +1078,7 @@ func TestAppsecRuleMatches(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) @@ -632,13 +1109,172 @@ func TestAppsecRuleMatches(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"tutu"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) require.False(t, responses[0].OutOfBandInterrupt) }, }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "no default remediation / custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "no match but try to set remediation to captcha with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set user HTTP code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set remediation with pre_eval hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, } for _, test := range tests { @@ -678,7 +1314,16 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } - appsecCfg := appsec.AppsecConfig{Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, PostEval: test.post_eval, OnMatch: test.on_match} + appsecCfg := appsec.AppsecConfig{Logger: logger, + OnLoad: test.on_load, + PreEval: test.pre_eval, + PostEval: test.post_eval, + OnMatch: test.on_match, + BouncerBlockedHTTPCode: test.BouncerBlockedHTTPCode, + UserBlockedHTTPCode: test.UserBlockedHTTPCode, + UserPassedHTTPCode: test.UserPassedHTTPCode, + DefaultRemediation: test.DefaultRemediation, + DefaultPassAction: test.DefaultPassAction} AppsecRuntime, err := appsecCfg.Build() if err != nil { t.Fatalf("unable to build appsec runtime : %s", err) @@ -724,8 +1369,10 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { runner.handleRequest(&input) time.Sleep(50 * time.Millisecond) + + http_status, appsecResponse := AppsecRuntime.GenerateResponse(OutputResponses[0], logger) log.Infof("events : %s", spew.Sdump(OutputEvents)) log.Infof("responses : %s", spew.Sdump(OutputResponses)) - test.output_asserts(OutputEvents, OutputResponses) + test.output_asserts(OutputEvents, OutputResponses, appsecResponse, http_status) } diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index ec7e7bef3b6..554fc3b7123 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -2,6 +2,7 @@ package appsec import ( "fmt" + "net/http" "os" "regexp" @@ -30,6 +31,12 @@ const ( hookOnMatch ) +const ( + BanRemediation = "ban" + CaptchaRemediation = "captcha" + AllowRemediation = "allow" +) + func (h *Hook) Build(hookStage int) error { ctx := map[string]interface{}{} @@ -62,12 +69,13 @@ func (h *Hook) Build(hookStage int) error { } type AppsecTempResponse struct { - InBandInterrupt bool - OutOfBandInterrupt bool - Action string //allow, deny, captcha, log - HTTPResponseCode int - SendEvent bool //do we send an internal event on rule match - SendAlert bool //do we send an alert on rule match + InBandInterrupt bool + OutOfBandInterrupt bool + Action string //allow, deny, captcha, log + UserHTTPResponseCode int //The response code to send to the user + BouncerHTTPResponseCode int //The response code to send to the remediation component + SendEvent bool //do we send an internal event on rule match + SendAlert bool //do we send an alert on rule match } type AppsecSubEngineOpts struct { @@ -110,31 +118,33 @@ type AppsecRuntimeConfig struct { } type AppsecConfig struct { - Name string `yaml:"name"` - OutOfBandRules []string `yaml:"outofband_rules"` - InBandRules []string `yaml:"inband_rules"` - DefaultRemediation string `yaml:"default_remediation"` - DefaultPassAction string `yaml:"default_pass_action"` - BlockedHTTPCode int `yaml:"blocked_http_code"` - PassedHTTPCode int `yaml:"passed_http_code"` - OnLoad []Hook `yaml:"on_load"` - PreEval []Hook `yaml:"pre_eval"` - PostEval []Hook `yaml:"post_eval"` - OnMatch []Hook `yaml:"on_match"` - VariablesTracking []string `yaml:"variables_tracking"` - InbandOptions AppsecSubEngineOpts `yaml:"inband_options"` - OutOfBandOptions AppsecSubEngineOpts `yaml:"outofband_options"` + Name string `yaml:"name"` + OutOfBandRules []string `yaml:"outofband_rules"` + InBandRules []string `yaml:"inband_rules"` + DefaultRemediation string `yaml:"default_remediation"` + DefaultPassAction string `yaml:"default_pass_action"` + BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` //returned to the bouncer + BouncerPassedHTTPCode int `yaml:"passed_http_code"` //returned to the bouncer + UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` //returned to the user + UserPassedHTTPCode int `yaml:"user_passed_http_code"` //returned to the user + + OnLoad []Hook `yaml:"on_load"` + PreEval []Hook `yaml:"pre_eval"` + PostEval []Hook `yaml:"post_eval"` + OnMatch []Hook `yaml:"on_match"` + VariablesTracking []string `yaml:"variables_tracking"` + InbandOptions AppsecSubEngineOpts `yaml:"inband_options"` + OutOfBandOptions AppsecSubEngineOpts `yaml:"outofband_options"` LogLevel *log.Level `yaml:"log_level"` Logger *log.Entry `yaml:"-"` } func (w *AppsecRuntimeConfig) ClearResponse() { - w.Logger.Debugf("#-> %p", w) w.Response = AppsecTempResponse{} - w.Logger.Debugf("-> %p", w.Config) w.Response.Action = w.Config.DefaultPassAction - w.Response.HTTPResponseCode = w.Config.PassedHTTPCode + w.Response.BouncerHTTPResponseCode = w.Config.BouncerPassedHTTPCode + w.Response.UserHTTPResponseCode = w.Config.UserPassedHTTPCode w.Response.SendEvent = true w.Response.SendAlert = true } @@ -191,24 +201,35 @@ func (wc *AppsecConfig) GetDataDir() string { func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret := &AppsecRuntimeConfig{Logger: wc.Logger.WithField("component", "appsec_runtime_config")} - //set the defaults - switch wc.DefaultRemediation { - case "": - wc.DefaultRemediation = "ban" - case "ban", "captcha", "log": - //those are the officially supported remediation(s) - default: - wc.Logger.Warningf("default '%s' remediation of %s is none of [ban,captcha,log] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name) + + if wc.BouncerBlockedHTTPCode == 0 { + wc.BouncerBlockedHTTPCode = http.StatusForbidden + } + if wc.BouncerPassedHTTPCode == 0 { + wc.BouncerPassedHTTPCode = http.StatusOK } - if wc.BlockedHTTPCode == 0 { - wc.BlockedHTTPCode = 403 + + if wc.UserBlockedHTTPCode == 0 { + wc.UserBlockedHTTPCode = http.StatusForbidden } - if wc.PassedHTTPCode == 0 { - wc.PassedHTTPCode = 200 + if wc.UserPassedHTTPCode == 0 { + wc.UserPassedHTTPCode = http.StatusOK } if wc.DefaultPassAction == "" { - wc.DefaultPassAction = "allow" + wc.DefaultPassAction = AllowRemediation } + if wc.DefaultRemediation == "" { + wc.DefaultRemediation = BanRemediation + } + + //set the defaults + switch wc.DefaultRemediation { + case BanRemediation, CaptchaRemediation, AllowRemediation: + //those are the officially supported remediation(s) + default: + wc.Logger.Warningf("default '%s' remediation of %s is none of [%s,%s,%s] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name, BanRemediation, CaptchaRemediation, AllowRemediation) + } + ret.Name = wc.Name ret.Config = wc ret.DefaultRemediation = wc.DefaultRemediation @@ -553,27 +574,13 @@ func (w *AppsecRuntimeConfig) SetActionByName(name string, action string) error func (w *AppsecRuntimeConfig) SetAction(action string) error { //log.Infof("setting to %s", action) w.Logger.Debugf("setting action to %s", action) - switch action { - case "allow": - w.Response.Action = action - w.Response.HTTPResponseCode = w.Config.PassedHTTPCode - //@tko how should we handle this ? it seems bouncer only understand bans, but it might be misleading ? - case "deny", "ban", "block": - w.Response.Action = "ban" - case "log": - w.Response.Action = action - w.Response.HTTPResponseCode = w.Config.PassedHTTPCode - case "captcha": - w.Response.Action = action - default: - w.Response.Action = action - } + w.Response.Action = action return nil } func (w *AppsecRuntimeConfig) SetHTTPCode(code int) error { w.Logger.Debugf("setting http code to %d", code) - w.Response.HTTPResponseCode = code + w.Response.UserHTTPResponseCode = code return nil } @@ -582,24 +589,23 @@ type BodyResponse struct { HTTPStatus int `json:"http_status"` } -func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logger *log.Entry) BodyResponse { - resp := BodyResponse{} - //if there is no interrupt, we should allow with default code - if !response.InBandInterrupt { - resp.Action = w.Config.DefaultPassAction - resp.HTTPStatus = w.Config.PassedHTTPCode - return resp - } - resp.Action = response.Action - if resp.Action == "" { - resp.Action = w.Config.DefaultRemediation - } - logger.Debugf("action is %s", resp.Action) +func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logger *log.Entry) (int, BodyResponse) { + var bouncerStatusCode int - resp.HTTPStatus = response.HTTPResponseCode - if resp.HTTPStatus == 0 { - resp.HTTPStatus = w.Config.BlockedHTTPCode + resp := BodyResponse{Action: response.Action} + if response.Action == AllowRemediation { + resp.HTTPStatus = w.Config.UserPassedHTTPCode + bouncerStatusCode = w.Config.BouncerPassedHTTPCode + } else { //ban, captcha and anything else + resp.HTTPStatus = response.UserHTTPResponseCode + if resp.HTTPStatus == 0 { + resp.HTTPStatus = w.Config.UserBlockedHTTPCode + } + bouncerStatusCode = response.BouncerHTTPResponseCode + if bouncerStatusCode == 0 { + bouncerStatusCode = w.Config.BouncerBlockedHTTPCode + } } - logger.Debugf("http status is %d", resp.HTTPStatus) - return resp + + return bouncerStatusCode, resp } From 58a1d7164f99bacbbe27c0d32b14bf63e27b4274 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 Feb 2024 17:39:50 +0100 Subject: [PATCH 018/318] refact "cscli lapi" (#2825) --- cmd/crowdsec-cli/lapi.go | 271 ++++++++++++++++++++++++--------------- cmd/crowdsec-cli/main.go | 2 +- 2 files changed, 167 insertions(+), 106 deletions(-) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index ce59ac370cd..0bb4a31b72a 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -6,6 +6,7 @@ import ( "fmt" "net/url" "os" + "slices" "sort" "strings" @@ -13,7 +14,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "slices" "github.com/crowdsecurity/go-cs-lib/version" @@ -29,15 +29,27 @@ import ( const LAPIURLPrefix = "v1" -func runLapiStatus(cmd *cobra.Command, args []string) error { - password := strfmt.Password(csConfig.API.Client.Credentials.Password) - apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) - login := csConfig.API.Client.Credentials.Login +type cliLapi struct { + cfg configGetter +} + +func NewCLILapi(cfg configGetter) *cliLapi { + return &cliLapi{ + cfg: cfg, + } +} + +func (cli *cliLapi) status() error { + cfg := cli.cfg() + password := strfmt.Password(cfg.API.Client.Credentials.Password) + login := cfg.API.Client.Credentials.Login + + apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } - hub, err := require.Hub(csConfig, nil, nil) + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } @@ -54,13 +66,14 @@ func runLapiStatus(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("init default client: %w", err) } + t := models.WatcherAuthRequest{ MachineID: &login, Password: &password, Scenarios: scenarios, } - log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath) + log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) log.Infof("Trying to authenticate with username %s on %s", login, apiurl) _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) @@ -69,26 +82,15 @@ func runLapiStatus(cmd *cobra.Command, args []string) error { } log.Infof("You can successfully interact with Local API (LAPI)") + return nil } -func runLapiRegister(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() +func (cli *cliLapi) register(apiURL string, outputFile string, machine string) error { + var err error - apiURL, err := flags.GetString("url") - if err != nil { - return err - } - - outputFile, err := flags.GetString("file") - if err != nil { - return err - } - - lapiUser, err := flags.GetString("machine") - if err != nil { - return err - } + lapiUser := machine + cfg := cli.cfg() if lapiUser == "" { lapiUser, err = generateID("") @@ -96,12 +98,15 @@ func runLapiRegister(cmd *cobra.Command, args []string) error { return fmt.Errorf("unable to generate machine id: %w", err) } } + password := strfmt.Password(generatePassword(passwordLength)) + if apiURL == "" { - if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil || csConfig.API.Client.Credentials.URL == "" { + if cfg.API.Client == nil || cfg.API.Client.Credentials == nil || cfg.API.Client.Credentials.URL == "" { return fmt.Errorf("no Local API URL. Please provide it in your configuration or with the -u parameter") } - apiURL = csConfig.API.Client.Credentials.URL + + apiURL = cfg.API.Client.Credentials.URL } /*URL needs to end with /, but user doesn't care*/ if !strings.HasSuffix(apiURL, "/") { @@ -111,10 +116,12 @@ func runLapiRegister(cmd *cobra.Command, args []string) error { if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") { apiURL = "http://" + apiURL } + apiurl, err := url.Parse(apiURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } + _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: lapiUser, Password: password, @@ -130,138 +137,142 @@ func runLapiRegister(cmd *cobra.Command, args []string) error { log.Printf("Successfully registered to Local API (LAPI)") var dumpFile string + if outputFile != "" { dumpFile = outputFile - } else if csConfig.API.Client.CredentialsFilePath != "" { - dumpFile = csConfig.API.Client.CredentialsFilePath + } else if cfg.API.Client.CredentialsFilePath != "" { + dumpFile = cfg.API.Client.CredentialsFilePath } else { dumpFile = "" } + apiCfg := csconfig.ApiCredentialsCfg{ Login: lapiUser, Password: password.String(), URL: apiURL, } + apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { return fmt.Errorf("unable to marshal api credentials: %w", err) } + if dumpFile != "" { err = os.WriteFile(dumpFile, apiConfigDump, 0o600) if err != nil { return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err) } + log.Printf("Local API credentials written to '%s'", dumpFile) } else { fmt.Printf("%s\n", string(apiConfigDump)) } + log.Warning(ReloadMessage()) return nil } -func NewLapiStatusCmd() *cobra.Command { +func (cli *cliLapi) newStatusCmd() *cobra.Command { cmdLapiStatus := &cobra.Command{ Use: "status", Short: "Check authentication to Local API (LAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: runLapiStatus, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.status() + }, } return cmdLapiStatus } -func NewLapiRegisterCmd() *cobra.Command { - cmdLapiRegister := &cobra.Command{ +func (cli *cliLapi) newRegisterCmd() *cobra.Command { + var ( + apiURL string + outputFile string + machine string + ) + + cmd := &cobra.Command{ Use: "register", Short: "Register a machine to Local API (LAPI)", Long: `Register your machine to the Local API (LAPI). Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: runLapiRegister, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.register(apiURL, outputFile, machine) + }, } - flags := cmdLapiRegister.Flags() - flags.StringP("url", "u", "", "URL of the API (ie. http://127.0.0.1)") - flags.StringP("file", "f", "", "output file destination") - flags.String("machine", "", "Name of the machine to register with") + flags := cmd.Flags() + flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") + flags.StringVarP(&outputFile, "file", "f", "", "output file destination") + flags.StringVar(&machine, "machine", "", "Name of the machine to register with") - return cmdLapiRegister + return cmd } -func NewLapiCmd() *cobra.Command { - cmdLapi := &cobra.Command{ +func (cli *cliLapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "lapi [action]", Short: "Manage interaction with Local API (LAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := csConfig.LoadAPIClient(); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if err := cli.cfg().LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } return nil }, } - cmdLapi.AddCommand(NewLapiRegisterCmd()) - cmdLapi.AddCommand(NewLapiStatusCmd()) - cmdLapi.AddCommand(NewLapiContextCmd()) + cmd.AddCommand(cli.newRegisterCmd()) + cmd.AddCommand(cli.newStatusCmd()) + cmd.AddCommand(cli.newContextCmd()) - return cmdLapi + return cmd } -func AddContext(key string, values []string) error { +func (cli *cliLapi) addContext(key string, values []string) error { + cfg := cli.cfg() + if err := alertcontext.ValidateContextExpr(key, values); err != nil { - return fmt.Errorf("invalid context configuration :%s", err) + return fmt.Errorf("invalid context configuration: %w", err) } - if _, ok := csConfig.Crowdsec.ContextToSend[key]; !ok { - csConfig.Crowdsec.ContextToSend[key] = make([]string, 0) + + if _, ok := cfg.Crowdsec.ContextToSend[key]; !ok { + cfg.Crowdsec.ContextToSend[key] = make([]string, 0) log.Infof("key '%s' added", key) } - data := csConfig.Crowdsec.ContextToSend[key] + + data := cfg.Crowdsec.ContextToSend[key] + for _, val := range values { if !slices.Contains(data, val) { log.Infof("value '%s' added to key '%s'", val, key) data = append(data, val) } - csConfig.Crowdsec.ContextToSend[key] = data + + cfg.Crowdsec.ContextToSend[key] = data } - if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil { + + if err := cfg.Crowdsec.DumpContextConfigFile(); err != nil { return err } return nil } -func NewLapiContextCmd() *cobra.Command { - cmdContext := &cobra.Command{ - Use: "context [command]", - Short: "Manage context to send with alerts", - DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := csConfig.LoadCrowdsec(); err != nil { - fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", csConfig.Crowdsec.ConsoleContextPath) - if err.Error() != fileNotFoundMessage { - return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err) - } - } - if csConfig.DisableAgent { - return errors.New("agent is disabled and lapi context can only be used on the agent") - } - - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - printHelp(cmd) - }, - } +func (cli *cliLapi) newContextAddCmd() *cobra.Command { + var ( + keyToAdd string + valuesToAdd []string + ) - var keyToAdd string - var valuesToAdd []string - cmdContextAdd := &cobra.Command{ + cmd := &cobra.Command{ Use: "add", Short: "Add context to send with alerts. You must specify the output key with the expr value you want", Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip @@ -269,18 +280,18 @@ cscli lapi context add --key file_source --value evt.Line.Src cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(csConfig, nil, nil) + RunE: func(_ *cobra.Command, _ []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } - if err = alertcontext.LoadConsoleContext(csConfig, hub); err != nil { + if err = alertcontext.LoadConsoleContext(cli.cfg(), hub); err != nil { return fmt.Errorf("while loading context: %w", err) } if keyToAdd != "" { - if err := AddContext(keyToAdd, valuesToAdd); err != nil { + if err := cli.addContext(keyToAdd, valuesToAdd); err != nil { return err } return nil @@ -290,7 +301,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user keySlice := strings.Split(v, ".") key := keySlice[len(keySlice)-1] value := []string{v} - if err := AddContext(key, value); err != nil { + if err := cli.addContext(key, value); err != nil { return err } } @@ -298,31 +309,37 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user return nil }, } - cmdContextAdd.Flags().StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") - cmdContextAdd.Flags().StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") - cmdContextAdd.MarkFlagRequired("value") - cmdContext.AddCommand(cmdContextAdd) - cmdContextStatus := &cobra.Command{ + flags := cmd.Flags() + flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") + flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") + cmd.MarkFlagRequired("value") + + return cmd +} + +func (cli *cliLapi) newContextStatusCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "status", Short: "List context to send with alerts", DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(csConfig, nil, nil) + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } - if err = alertcontext.LoadConsoleContext(csConfig, hub); err != nil { + if err = alertcontext.LoadConsoleContext(cfg, hub); err != nil { return fmt.Errorf("while loading context: %w", err) } - if len(csConfig.Crowdsec.ContextToSend) == 0 { + if len(cfg.Crowdsec.ContextToSend) == 0 { fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.") return nil } - dump, err := yaml.Marshal(csConfig.Crowdsec.ContextToSend) + dump, err := yaml.Marshal(cfg.Crowdsec.ContextToSend) if err != nil { return fmt.Errorf("unable to show context status: %w", err) } @@ -332,10 +349,14 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user return nil }, } - cmdContext.AddCommand(cmdContextStatus) + return cmd +} + +func (cli *cliLapi) newContextDetectCmd() *cobra.Command { var detectAll bool - cmdContextDetect := &cobra.Command{ + + cmd := &cobra.Command{ Use: "detect", Short: "Detect available fields from the installed parsers", Example: `cscli lapi context detect --all @@ -343,6 +364,7 @@ cscli lapi context detect crowdsecurity/sshd-logs `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() if !detectAll && len(args) == 0 { log.Infof("Please provide parsers to detect or --all flag.") printHelp(cmd) @@ -355,13 +377,13 @@ cscli lapi context detect crowdsecurity/sshd-logs return fmt.Errorf("failed to init expr helpers: %w", err) } - hub, err := require.Hub(csConfig, nil, nil) + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } csParsers := parser.NewParsers(hub) - if csParsers, err = parser.LoadParsers(csConfig, csParsers); err != nil { + if csParsers, err = parser.LoadParsers(cfg, csParsers); err != nil { return fmt.Errorf("unable to load parsers: %w", err) } @@ -418,47 +440,85 @@ cscli lapi context detect crowdsecurity/sshd-logs return nil }, } - cmdContextDetect.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser") - cmdContext.AddCommand(cmdContextDetect) + cmd.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser") + + return cmd +} - cmdContextDelete := &cobra.Command{ +func (cli *cliLapi) newContextDeleteCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "delete", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - filePath := csConfig.Crowdsec.ConsoleContextPath + filePath := cli.cfg().Crowdsec.ConsoleContextPath if filePath == "" { filePath = "the context file" } - fmt.Printf("Command \"delete\" is deprecated, please manually edit %s.", filePath) + fmt.Printf("Command 'delete' is deprecated, please manually edit %s.", filePath) + + return nil + }, + } + + return cmd +} + +func (cli *cliLapi) newContextCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "context [command]", + Short: "Manage context to send with alerts", + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := cfg.LoadCrowdsec(); err != nil { + fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", cfg.Crowdsec.ConsoleContextPath) + if err.Error() != fileNotFoundMessage { + return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err) + } + } + if cfg.DisableAgent { + return errors.New("agent is disabled and lapi context can only be used on the agent") + } + return nil }, + Run: func(cmd *cobra.Command, _ []string) { + printHelp(cmd) + }, } - cmdContext.AddCommand(cmdContextDelete) - return cmdContext + cmd.AddCommand(cli.newContextAddCmd()) + cmd.AddCommand(cli.newContextStatusCmd()) + cmd.AddCommand(cli.newContextDetectCmd()) + cmd.AddCommand(cli.newContextDeleteCmd()) + + return cmd } -func detectStaticField(GrokStatics []parser.ExtraField) []string { +func detectStaticField(grokStatics []parser.ExtraField) []string { ret := make([]string, 0) - for _, static := range GrokStatics { + for _, static := range grokStatics { if static.Parsed != "" { fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed) if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } + if static.Meta != "" { fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta) if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } + if static.TargetByName != "" { fieldName := static.TargetByName if !strings.HasPrefix(fieldName, "evt.") { fieldName = "evt." + fieldName } + if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -526,6 +586,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { } } } + if subnode.Grok.RegexpName != "" { grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName) if err == nil { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 62b85e63047..b0855fb047e 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -241,7 +241,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) - cmd.AddCommand(NewLapiCmd()) + cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(NewConsoleCmd()) cmd.AddCommand(NewCLIExplain().NewCommand()) From 2853410576456471b3f0efef223c4f7bb04600ab Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 Feb 2024 17:51:29 +0100 Subject: [PATCH 019/318] refact "cscli alerts" (#2827) --- cmd/crowdsec-cli/alerts.go | 181 ++++++++++++++++++++++--------------- cmd/crowdsec-cli/main.go | 2 +- 2 files changed, 109 insertions(+), 74 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 4ab71be5bbf..ce304bcc777 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -29,39 +29,46 @@ import ( func DecisionsFromAlert(alert *models.Alert) string { ret := "" - var decMap = make(map[string]int) + decMap := make(map[string]int) + for _, decision := range alert.Decisions { k := *decision.Type if *decision.Simulated { k = fmt.Sprintf("(simul)%s", k) } + v := decMap[k] decMap[k] = v + 1 } + for k, v := range decMap { if len(ret) > 0 { ret += " " } + ret += fmt.Sprintf("%s:%d", k, v) } + return ret } -func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { - switch csConfig.Cscli.Output { +func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + switch cli.cfg().Cscli.Output { case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} + if printMachine { header = append(header, "machine") } - err := csvwriter.Write(header) - if err != nil { + + if err := csvwriter.Write(header); err != nil { return err } + for _, alertItem := range *alerts { row := []string{ - fmt.Sprintf("%d", alertItem.ID), + strconv.FormatInt(alertItem.ID, 10), *alertItem.Source.Scope, *alertItem.Source.Value, *alertItem.Scenario, @@ -73,11 +80,12 @@ func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { if printMachine { row = append(row, alertItem.MachineID) } - err := csvwriter.Write(row) - if err != nil { + + if err := csvwriter.Write(row); err != nil { return err } } + csvwriter.Flush() case "json": if *alerts == nil { @@ -86,6 +94,7 @@ func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { fmt.Println("[]") return nil } + x, _ := json.MarshalIndent(alerts, "", " ") fmt.Print(string(x)) case "human": @@ -93,8 +102,10 @@ func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { fmt.Println("No active alerts") return nil } + alertsTable(color.Output, alerts, printMachine) } + return nil } @@ -116,13 +127,13 @@ var alertTemplate = ` ` -func displayOneAlert(alert *models.Alert, withDetail bool) error { +func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error { tmpl, err := template.New("alert").Parse(alertTemplate) if err != nil { return err } - err = tmpl.Execute(os.Stdout, alert) - if err != nil { + + if err = tmpl.Execute(os.Stdout, alert); err != nil { return err } @@ -133,14 +144,17 @@ func displayOneAlert(alert *models.Alert, withDetail bool) error { sort.Slice(alert.Meta, func(i, j int) bool { return alert.Meta[i].Key < alert.Meta[j].Key }) + table := newTable(color.Output) table.SetRowLines(false) table.SetHeaders("Key", "Value") + for _, meta := range alert.Meta { var valSlice []string if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil { - return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err) + return fmt.Errorf("unknown context value type '%s': %w", meta.Value, err) } + for _, value := range valSlice { table.AddRow( meta.Key, @@ -148,11 +162,13 @@ func displayOneAlert(alert *models.Alert, withDetail bool) error { ) } } + table.Render() } if withDetail { fmt.Printf("\n - Events :\n") + for _, event := range alert.Events { alertEventTable(color.Output, event) } @@ -163,10 +179,13 @@ func displayOneAlert(alert *models.Alert, withDetail bool) error { type cliAlerts struct{ client *apiclient.ApiClient + cfg configGetter } -func NewCLIAlerts() *cliAlerts { - return &cliAlerts{} +func NewCLIAlerts(getconfig configGetter) *cliAlerts { + return &cliAlerts{ + cfg: getconfig, + } } func (cli *cliAlerts) NewCommand() *cobra.Command { @@ -176,18 +195,18 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, Aliases: []string{"alert"}, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - var err error - if err := csConfig.LoadAPIClient(); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { return fmt.Errorf("parsing api url %s: %w", apiURL, err) } cli.client, err = apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Client.Credentials.Login, - Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + MachineID: cfg.API.Client.Credentials.Login, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiURL, VersionPrefix: "v1", @@ -196,6 +215,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("new api client: %w", err) } + return nil }, } @@ -221,8 +241,10 @@ func (cli *cliAlerts) NewListCmd() *cobra.Command { IncludeCAPI: new(bool), OriginEquals: new(string), } + limit := new(int) contained := new(bool) + var printMachine bool cmd := &cobra.Command{ @@ -234,9 +256,7 @@ cscli alerts list --range 1.2.3.0/24 cscli alerts list -s crowdsecurity/ssh-bf cscli alerts list --type ban`, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - var err error - + RunE: func(cmd *cobra.Command, _ []string) error { if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { printHelp(cmd) @@ -304,40 +324,43 @@ cscli alerts list --type ban`, alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) if err != nil { - return fmt.Errorf("unable to list alerts: %v", err) + return fmt.Errorf("unable to list alerts: %w", err) } - err = alertsToTable(alerts, printMachine) - if err != nil { - return fmt.Errorf("unable to list alerts: %v", err) + if err = cli.alertsToTable(alerts, printMachine); err != nil { + return fmt.Errorf("unable to list alerts: %w", err) } return nil }, } - cmd.Flags().SortFlags = false - cmd.Flags().BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") - cmd.Flags().StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") - cmd.Flags().StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") - cmd.Flags().StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") - cmd.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") - cmd.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") - cmd.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - cmd.Flags().StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") - cmd.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts") - cmd.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") + + flags := cmd.Flags() + flags.SortFlags = false + flags.BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + flags.StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + flags.StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + flags.StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + flags.StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") + flags.StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") + flags.StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") + flags.StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.BoolVar(contained, "contained", false, "query decisions contained by range") + flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts") + flags.IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") return cmd } func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { - var ActiveDecision *bool - var AlertDeleteAll bool - var delAlertByID string - contained := new(bool) + var ( + ActiveDecision *bool + AlertDeleteAll bool + delAlertByID string + ) + var alertDeleteFilter = apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -345,6 +368,9 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { IPEquals: new(string), RangeEquals: new(string), } + + contained := new(bool) + cmd := &cobra.Command{ Use: "delete [filters] [--all]", Short: `Delete alerts @@ -355,7 +381,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, DisableAutoGenTag: true, Aliases: []string{"remove"}, Args: cobra.ExactArgs(0), - PreRunE: func(cmd *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, _ []string) error { if AlertDeleteAll { return nil } @@ -368,11 +394,11 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var err error if !AlertDeleteAll { - if err := manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, + if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { printHelp(cmd) return err @@ -410,12 +436,12 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, if delAlertByID == "" { alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) if err != nil { - return fmt.Errorf("unable to delete alerts : %v", err) + return fmt.Errorf("unable to delete alerts: %w", err) } } else { alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) if err != nil { - return fmt.Errorf("unable to delete alert: %v", err) + return fmt.Errorf("unable to delete alert: %w", err) } } log.Infof("%s alert(s) deleted", alerts.NbDeleted) @@ -423,26 +449,31 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, } - cmd.Flags().SortFlags = false - cmd.Flags().StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") - cmd.Flags().StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - cmd.Flags().StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - cmd.Flags().StringVar(&delAlertByID, "id", "", "alert ID") - cmd.Flags().BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + flags := cmd.Flags() + flags.SortFlags = false + flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + flags.StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVar(&delAlertByID, "id", "", "alert ID") + flags.BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + flags.BoolVar(contained, "contained", false, "query decisions contained by range") + return cmd } func (cli *cliAlerts) NewInspectCmd() *cobra.Command { var details bool + cmd := &cobra.Command{ Use: `inspect "alert_id"`, Short: `Show info about an alert`, Example: `cscli alerts inspect 123`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() if len(args) == 0 { printHelp(cmd) return fmt.Errorf("missing alert_id") @@ -454,31 +485,32 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { } alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) if err != nil { - return fmt.Errorf("can't find alert with id %s: %s", alertID, err) + return fmt.Errorf("can't find alert with id %s: %w", alertID, err) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": - if err := displayOneAlert(alert, details); err != nil { + if err := cli.displayOneAlert(alert, details); err != nil { continue } case "json": data, err := json.MarshalIndent(alert, "", " ") if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %s", alertID, err) + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) } fmt.Printf("%s\n", string(data)) case "raw": data, err := yaml.Marshal(alert) if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %s", alertID, err) + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) } } return nil }, } + cmd.Flags().SortFlags = false cmd.Flags().BoolVarP(&details, "details", "d", false, "show alerts with events") @@ -486,27 +518,30 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { } func (cli *cliAlerts) NewFlushCmd() *cobra.Command { - var maxItems int - var maxAge string + var ( + maxItems int + maxAge string + ) + cmd := &cobra.Command{ Use: `flush`, Short: `Flush alerts /!\ This command can be used only on the same machine than the local API`, Example: `cscli alerts flush --max-items 1000 --max-age 7d`, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - var err error - if err := require.LAPI(csConfig); err != nil { + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - db, err := database.NewClient(csConfig.DbConfig) + db, err := database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %s", err) + return fmt.Errorf("unable to create new database client: %w", err) } log.Info("Flushing alerts. !! This may take a long time !!") err = db.FlushAlerts(maxAge, maxItems) if err != nil { - return fmt.Errorf("unable to flush alerts: %s", err) + return fmt.Errorf("unable to flush alerts: %w", err) } log.Info("Alerts flushed") diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index b0855fb047e..55fcacee39c 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -236,7 +236,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAlerts().NewCommand()) + cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) From 5c83695177cd4044a8cc953978103377b63607f0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:23:17 +0100 Subject: [PATCH 020/318] refact "cscli explain" (#2835) --- cmd/crowdsec-cli/explain.go | 184 +++++++++++++++--------------------- cmd/crowdsec-cli/main.go | 2 +- go.mod | 4 +- go.sum | 14 +-- 4 files changed, 82 insertions(+), 122 deletions(-) diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/explain.go index d21c1704930..ce323fd0ce1 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/explain.go @@ -16,33 +16,53 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func GetLineCountForFile(filepath string) (int, error) { +func getLineCountForFile(filepath string) (int, error) { f, err := os.Open(filepath) if err != nil { return 0, err } defer f.Close() + lc := 0 fs := bufio.NewReader(f) + for { input, err := fs.ReadBytes('\n') if len(input) > 1 { lc++ } + if err != nil && err == io.EOF { break } } + return lc, nil } -type cliExplain struct{} +type cliExplain struct { + cfg configGetter + flags struct { + logFile string + dsn string + logLine string + logType string + details bool + skipOk bool + onlySuccessfulParsers bool + noClean bool + crowdsec string + labels string + } +} -func NewCLIExplain() *cliExplain { - return &cliExplain{} +func NewCLIExplain(cfg configGetter) *cliExplain { + return &cliExplain{ + cfg: cfg, + } } -func (cli cliExplain) NewCommand() *cobra.Command { +func (cli *cliExplain) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "explain", Short: "Explain log pipeline", @@ -57,118 +77,50 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.run, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - logFile, err := flags.GetString("file") - if err != nil { - return err - } - - dsn, err := flags.GetString("dsn") - if err != nil { - return err - } - - logLine, err := flags.GetString("log") - if err != nil { - return err - } - - logType, err := flags.GetString("type") - if err != nil { - return err - } - - if logLine == "" && logFile == "" && dsn == "" { - printHelp(cmd) - fmt.Println() - return fmt.Errorf("please provide --log, --file or --dsn flag") - } - if logType == "" { - printHelp(cmd) - fmt.Println() - return fmt.Errorf("please provide --type flag") - } + RunE: func(_ *cobra.Command, _ []string) error { + return cli.run() + }, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { fileInfo, _ := os.Stdin.Stat() - if logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { + if cli.flags.logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { return fmt.Errorf("the option -f - is intended to work with pipes") } + return nil }, } flags := cmd.Flags() - flags.StringP("file", "f", "", "Log file to test") - flags.StringP("dsn", "d", "", "DSN to test") - flags.StringP("log", "l", "", "Log line to test") - flags.StringP("type", "t", "", "Type of the acquisition to test") - flags.String("labels", "", "Additional labels to add to the acquisition format (key:value,key2:value2)") - flags.BoolP("verbose", "v", false, "Display individual changes") - flags.Bool("failures", false, "Only show failed lines") - flags.Bool("only-successful-parsers", false, "Only show successful parsers") - flags.String("crowdsec", "crowdsec", "Path to crowdsec") - flags.Bool("no-clean", false, "Don't clean runtime environment after tests") + flags.StringVarP(&cli.flags.logFile, "file", "f", "", "Log file to test") + flags.StringVarP(&cli.flags.dsn, "dsn", "d", "", "DSN to test") + flags.StringVarP(&cli.flags.logLine, "log", "l", "", "Log line to test") + flags.StringVarP(&cli.flags.logType, "type", "t", "", "Type of the acquisition to test") + flags.StringVar(&cli.flags.labels, "labels", "", "Additional labels to add to the acquisition format (key:value,key2:value2)") + flags.BoolVarP(&cli.flags.details, "verbose", "v", false, "Display individual changes") + flags.BoolVar(&cli.flags.skipOk, "failures", false, "Only show failed lines") + flags.BoolVar(&cli.flags.onlySuccessfulParsers, "only-successful-parsers", false, "Only show successful parsers") + flags.StringVar(&cli.flags.crowdsec, "crowdsec", "crowdsec", "Path to crowdsec") + flags.BoolVar(&cli.flags.noClean, "no-clean", false, "Don't clean runtime environment after tests") + + cmd.MarkFlagRequired("type") + cmd.MarkFlagsOneRequired("log", "file", "dsn") return cmd } -func (cli cliExplain) run(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - logFile, err := flags.GetString("file") - if err != nil { - return err - } +func (cli *cliExplain) run() error { + logFile := cli.flags.logFile + logLine := cli.flags.logLine + logType := cli.flags.logType + dsn := cli.flags.dsn + labels := cli.flags.labels + crowdsec := cli.flags.crowdsec - dsn, err := flags.GetString("dsn") - if err != nil { - return err - } - - logLine, err := flags.GetString("log") - if err != nil { - return err - } - - logType, err := flags.GetString("type") - if err != nil { - return err - } - - opts := dumps.DumpOpts{} - - opts.Details, err = flags.GetBool("verbose") - if err != nil { - return err - } - - no_clean, err := flags.GetBool("no-clean") - if err != nil { - return err - } - - opts.SkipOk, err = flags.GetBool("failures") - if err != nil { - return err - } - - opts.ShowNotOkParsers, err = flags.GetBool("only-successful-parsers") - opts.ShowNotOkParsers = !opts.ShowNotOkParsers - if err != nil { - return err - } - - crowdsec, err := flags.GetString("crowdsec") - if err != nil { - return err - } - - labels, err := flags.GetString("labels") - if err != nil { - return err + opts := dumps.DumpOpts{ + Details: cli.flags.details, + SkipOk: cli.flags.skipOk, + ShowNotOkParsers: !cli.flags.onlySuccessfulParsers, } var f *os.File @@ -176,21 +128,25 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { // using empty string fallback to /tmp dir, err := os.MkdirTemp("", "cscli_explain") if err != nil { - return fmt.Errorf("couldn't create a temporary directory to store cscli explain result: %s", err) + return fmt.Errorf("couldn't create a temporary directory to store cscli explain result: %w", err) } + defer func() { - if no_clean { + if cli.flags.noClean { return } + if _, err := os.Stat(dir); !os.IsNotExist(err) { if err := os.RemoveAll(dir); err != nil { log.Errorf("unable to delete temporary directory '%s': %s", dir, err) } } }() + // we create a temporary log file if a log line/stdin has been provided if logLine != "" || logFile == "-" { tmpFile := filepath.Join(dir, "cscli_test_tmp.log") + f, err = os.Create(tmpFile) if err != nil { return err @@ -220,6 +176,7 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { log.Warnf("Failed to write %d lines to %s", errCount, tmpFile) } } + f.Close() // this is the file that was going to be read by crowdsec anyway logFile = tmpFile @@ -230,15 +187,20 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("unable to get absolute path of '%s', exiting", logFile) } + dsn = fmt.Sprintf("file://%s", absolutePath) - lineCount, err := GetLineCountForFile(absolutePath) + + lineCount, err := getLineCountForFile(absolutePath) if err != nil { return err } + log.Debugf("file %s has %d lines", absolutePath, lineCount) + if lineCount == 0 { return fmt.Errorf("the log file is empty: %s", absolutePath) } + if lineCount > 100 { log.Warnf("%s contains %d lines. This may take a lot of resources.", absolutePath, lineCount) } @@ -249,15 +211,19 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { } cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} + if labels != "" { log.Debugf("adding labels %s", labels) cmdArgs = append(cmdArgs, "-label", labels) } + crowdsecCmd := exec.Command(crowdsec, cmdArgs...) + output, err := crowdsecCmd.CombinedOutput() if err != nil { fmt.Println(string(output)) - return fmt.Errorf("fail to run crowdsec for test: %v", err) + + return fmt.Errorf("fail to run crowdsec for test: %w", err) } parserDumpFile := filepath.Join(dir, hubtest.ParserResultFileName) @@ -265,12 +231,12 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { parserDump, err := dumps.LoadParserDump(parserDumpFile) if err != nil { - return fmt.Errorf("unable to load parser dump result: %s", err) + return fmt.Errorf("unable to load parser dump result: %w", err) } bucketStateDump, err := dumps.LoadBucketPourDump(bucketStateDumpFile) if err != nil { - return fmt.Errorf("unable to load bucket dump result: %s", err) + return fmt.Errorf("unable to load bucket dump result: %w", err) } dumps.DumpTree(*parserDump, *bucketStateDump, opts) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 55fcacee39c..43998623566 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -244,7 +244,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(NewConsoleCmd()) - cmd.AddCommand(NewCLIExplain().NewCommand()) + cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications().NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) diff --git a/go.mod b/go.mod index d61c191c14f..e1da18387a5 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.3.0 @@ -108,7 +108,7 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/corazawaf/libinjection-go v0.1.2 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect diff --git a/go.sum b/go.sum index f5f61594ecd..8fa2021316b 100644 --- a/go.sum +++ b/go.sum @@ -91,21 +91,17 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20231213144607-41d5358da94f h1:FkOB9aDw0xzDd14pTarGRLsUNAymONq3dc7zhvsXElg= -github.com/crowdsecurity/coraza/v3 v3.0.0-20231213144607-41d5358da94f/go.mod h1:TrU7Li+z2RHNrPy0TKJ6R65V6Yzpan2sTIRryJJyJso= github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI= github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.5 h1:eVLW+BRj3ZYn0xt5/xmgzfbbB8EBo32gM4+WpQQk2e8= -github.com/crowdsecurity/go-cs-lib v0.0.5/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/go-cs-lib v0.0.6 h1:Ef6MylXe0GaJE9vrfvxEdbHb31+JUP1os+murPz7Pos= github.com/crowdsecurity/go-cs-lib v0.0.6/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= @@ -640,8 +636,8 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -809,8 +805,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From bdecf38616723dddf30a7c776694cd020f8a6944 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 12 Feb 2024 11:33:44 +0100 Subject: [PATCH 021/318] update codeql action to v3 (#2822) --- .github/workflows/codeql-analysis.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0904769dd60..4b262f13d09 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -48,10 +48,15 @@ jobs: with: # required to pick up tags for BUILD_VERSION fetch-depth: 0 + - name: "Set up Go" + uses: actions/setup-go@v5 + with: + go-version: "1.21.6" + cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -71,14 +76,8 @@ jobs: # and modify them (or add more) to build your code if your project # uses a compiled language - - name: "Set up Go" - uses: actions/setup-go@v5 - with: - go-version: "1.21.6" - cache-dependency-path: "**/go.sum" - - run: | make clean build BUILD_RE2_WASM=1 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From eada3739e6849cf6da085dfa4862dcbfad4deb10 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:40:59 +0100 Subject: [PATCH 022/318] refact "cscli notifications" (#2833) --- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/notifications.go | 155 ++++++++++++++++++------------ 2 files changed, 95 insertions(+), 62 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 43998623566..63b7211b39b 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -246,7 +246,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewConsoleCmd()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest().NewCommand()) - cmd.AddCommand(NewCLINotifications().NewCommand()) + cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection().NewCommand()) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index da436420d12..f12333a3942 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -23,14 +23,13 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" - "github.com/crowdsecurity/crowdsec/pkg/types" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" ) type NotificationsCfg struct { @@ -39,13 +38,17 @@ type NotificationsCfg struct { ids []uint } -type cliNotifications struct{} +type cliNotifications struct { + cfg configGetter +} -func NewCLINotifications() *cliNotifications { - return &cliNotifications{} +func NewCLINotifications(cfg configGetter) *cliNotifications { + return &cliNotifications{ + cfg: cfg, + } } -func (cli cliNotifications) NewCommand() *cobra.Command { +func (cli *cliNotifications) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "notifications [action]", Short: "Helper for notification plugin configuration", @@ -53,14 +56,15 @@ func (cli cliNotifications) NewCommand() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"notifications", "notification"}, DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := csConfig.LoadAPIClient(); err != nil { + if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - if err := require.Notifications(csConfig); err != nil { + if err := require.Notifications(cfg); err != nil { return err } @@ -76,67 +80,79 @@ func (cli cliNotifications) NewCommand() *cobra.Command { return cmd } -func getPluginConfigs() (map[string]csplugin.PluginConfig, error) { +func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfig, error) { + cfg := cli.cfg() pcfgs := map[string]csplugin.PluginConfig{} wf := func(path string, info fs.FileInfo, err error) error { if info == nil { return fmt.Errorf("error while traversing directory %s: %w", path, err) } - name := filepath.Join(csConfig.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice + + name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) { ts, err := csplugin.ParsePluginConfigFile(name) if err != nil { return fmt.Errorf("loading notifification plugin configuration with %s: %w", name, err) } + for _, t := range ts { csplugin.SetRequiredFields(&t) pcfgs[t.Name] = t } } + return nil } - if err := filepath.Walk(csConfig.ConfigPaths.NotificationDir, wf); err != nil { + if err := filepath.Walk(cfg.ConfigPaths.NotificationDir, wf); err != nil { return nil, fmt.Errorf("while loading notifification plugin configuration: %w", err) } + return pcfgs, nil } -func getProfilesConfigs() (map[string]NotificationsCfg, error) { +func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, error) { + cfg := cli.cfg() // A bit of a tricky stuf now: reconcile profiles and notification plugins - pcfgs, err := getPluginConfigs() + pcfgs, err := cli.getPluginConfigs() if err != nil { return nil, err } + ncfgs := map[string]NotificationsCfg{} for _, pc := range pcfgs { ncfgs[pc.Name] = NotificationsCfg{ Config: pc, } } - profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles) + + profiles, err := csprofiles.NewProfile(cfg.API.Server.Profiles) if err != nil { return nil, fmt.Errorf("while extracting profiles from configuration: %w", err) } + for profileID, profile := range profiles { for _, notif := range profile.Cfg.Notifications { pc, ok := pcfgs[notif] if !ok { return nil, fmt.Errorf("notification plugin '%s' does not exist", notif) } + tmp, ok := ncfgs[pc.Name] if !ok { return nil, fmt.Errorf("notification plugin '%s' does not exist", pc.Name) } + tmp.Profiles = append(tmp.Profiles, profile.Cfg) tmp.ids = append(tmp.ids, uint(profileID)) ncfgs[pc.Name] = tmp } } + return ncfgs, nil } -func (cli cliNotifications) NewListCmd() *cobra.Command { +func (cli *cliNotifications) NewListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list active notifications plugins", @@ -144,21 +160,22 @@ func (cli cliNotifications) NewListCmd() *cobra.Command { Example: `cscli notifications list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, arg []string) error { - ncfgs, err := getProfilesConfigs() + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + ncfgs, err := cli.getProfilesConfigs() if err != nil { return fmt.Errorf("can't build profiles configuration: %w", err) } - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { notificationListTable(color.Output, ncfgs) - } else if csConfig.Cscli.Output == "json" { + } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { return fmt.Errorf("failed to marshal notification configuration: %w", err) } fmt.Printf("%s", string(x)) - } else if csConfig.Cscli.Output == "raw" { + } else if cfg.Cscli.Output == "raw" { csvwriter := csv.NewWriter(os.Stdout) err := csvwriter.Write([]string{"Name", "Type", "Profile name"}) if err != nil { @@ -176,6 +193,7 @@ func (cli cliNotifications) NewListCmd() *cobra.Command { } csvwriter.Flush() } + return nil }, } @@ -183,7 +201,7 @@ func (cli cliNotifications) NewListCmd() *cobra.Command { return cmd } -func (cli cliNotifications) NewInspectCmd() *cobra.Command { +func (cli *cliNotifications) NewInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", Short: "Inspect active notifications plugin configuration", @@ -191,36 +209,32 @@ func (cli cliNotifications) NewInspectCmd() *cobra.Command { Example: `cscli notifications inspect `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, args []string) error { - if args[0] == "" { - return fmt.Errorf("please provide a plugin name to inspect") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - ncfgs, err := getProfilesConfigs() + RunE: func(_ *cobra.Command, args []string) error { + cfg := cli.cfg() + ncfgs, err := cli.getProfilesConfigs() if err != nil { return fmt.Errorf("can't build profiles configuration: %w", err) } - cfg, ok := ncfgs[args[0]] + ncfg, ok := ncfgs[args[0]] if !ok { return fmt.Errorf("plugin '%s' does not exist or is not active", args[0]) } - if csConfig.Cscli.Output == "human" || csConfig.Cscli.Output == "raw" { - fmt.Printf(" - %15s: %15s\n", "Type", cfg.Config.Type) - fmt.Printf(" - %15s: %15s\n", "Name", cfg.Config.Name) - fmt.Printf(" - %15s: %15s\n", "Timeout", cfg.Config.TimeOut) - fmt.Printf(" - %15s: %15s\n", "Format", cfg.Config.Format) - for k, v := range cfg.Config.Config { + if cfg.Cscli.Output == "human" || cfg.Cscli.Output == "raw" { + fmt.Printf(" - %15s: %15s\n", "Type", ncfg.Config.Type) + fmt.Printf(" - %15s: %15s\n", "Name", ncfg.Config.Name) + fmt.Printf(" - %15s: %15s\n", "Timeout", ncfg.Config.TimeOut) + fmt.Printf(" - %15s: %15s\n", "Format", ncfg.Config.Format) + for k, v := range ncfg.Config.Config { fmt.Printf(" - %15s: %15v\n", k, v) } - } else if csConfig.Cscli.Output == "json" { + } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(cfg, "", " ") if err != nil { return fmt.Errorf("failed to marshal notification configuration: %w", err) } fmt.Printf("%s", string(x)) } + return nil }, } @@ -228,12 +242,13 @@ func (cli cliNotifications) NewInspectCmd() *cobra.Command { return cmd } -func (cli cliNotifications) NewTestCmd() *cobra.Command { +func (cli *cliNotifications) NewTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb alertOverride string ) + cmd := &cobra.Command{ Use: "test [plugin name]", Short: "send a generic test alert to notification plugin", @@ -241,25 +256,26 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { Example: `cscli notifications test [plugin_name]`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, args []string) error { - pconfigs, err := getPluginConfigs() + PreRunE: func(_ *cobra.Command, args []string) error { + cfg := cli.cfg() + pconfigs, err := cli.getPluginConfigs() if err != nil { return fmt.Errorf("can't build profiles configuration: %w", err) } - cfg, ok := pconfigs[args[0]] + pcfg, ok := pconfigs[args[0]] if !ok { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } //Create a single profile with plugin name as notification name - return pluginBroker.Init(csConfig.PluginConfig, []*csconfig.ProfileCfg{ + return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ - cfg.Name, + pcfg.Name, }, }, - }, csConfig.ConfigPaths) + }, cfg.ConfigPaths) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { pluginTomb.Go(func() error { pluginBroker.Run(&pluginTomb) return nil @@ -298,13 +314,16 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { if err := yaml.Unmarshal([]byte(alertOverride), alert); err != nil { return fmt.Errorf("failed to unmarshal alert override: %w", err) } + pluginBroker.PluginChannel <- csplugin.ProfileAlert{ ProfileID: uint(0), Alert: alert, } + //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent pluginTomb.Kill(fmt.Errorf("terminating")) pluginTomb.Wait() + return nil }, } @@ -313,9 +332,11 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { return cmd } -func (cli cliNotifications) NewReinjectCmd() *cobra.Command { - var alertOverride string - var alert *models.Alert +func (cli *cliNotifications) NewReinjectCmd() *cobra.Command { + var ( + alertOverride string + alert *models.Alert + ) cmd := &cobra.Command{ Use: "reinject", @@ -328,25 +349,30 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, args []string) error { + PreRunE: func(_ *cobra.Command, args []string) error { var err error - alert, err = FetchAlertFromArgString(args[0]) + alert, err = cli.fetchAlertFromArgString(args[0]) if err != nil { return err } + return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb ) + + cfg := cli.cfg() + if alertOverride != "" { if err := json.Unmarshal([]byte(alertOverride), alert); err != nil { return fmt.Errorf("can't unmarshal data in the alert flag: %w", err) } } - err := pluginBroker.Init(csConfig.PluginConfig, csConfig.API.Server.Profiles, csConfig.ConfigPaths) + + err := pluginBroker.Init(cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) if err != nil { return fmt.Errorf("can't initialize plugins: %w", err) } @@ -356,7 +382,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return nil }) - profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles) + profiles, err := csprofiles.NewProfile(cfg.API.Server.Profiles) if err != nil { return fmt.Errorf("cannot extract profiles from configuration: %w", err) } @@ -382,9 +408,9 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not default: time.Sleep(50 * time.Millisecond) log.Info("sleeping\n") - } } + if profile.Cfg.OnSuccess == "break" { log.Infof("The profile %s contains a 'on_success: break' so bailing out", profile.Cfg.Name) break @@ -393,6 +419,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent pluginTomb.Kill(fmt.Errorf("terminating")) pluginTomb.Wait() + return nil }, } @@ -401,18 +428,22 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return cmd } -func FetchAlertFromArgString(toParse string) (*models.Alert, error) { +func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Alert, error) { + cfg := cli.cfg() + id, err := strconv.Atoi(toParse) if err != nil { return nil, fmt.Errorf("bad alert id %s", toParse) } - apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { return nil, fmt.Errorf("error parsing the URL of the API: %w", err) } + client, err := apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Client.Credentials.Login, - Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + MachineID: cfg.API.Client.Credentials.Login, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiURL, VersionPrefix: "v1", @@ -420,9 +451,11 @@ func FetchAlertFromArgString(toParse string) (*models.Alert, error) { if err != nil { return nil, fmt.Errorf("error creating the client for the API: %w", err) } + alert, _, err := client.Alerts.GetByID(context.Background(), id) if err != nil { return nil, fmt.Errorf("can't find alert with id %d: %w", id, err) } + return alert, nil } From a6a4d460d7069a67369906fbe4447eed601b4942 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:45:58 +0100 Subject: [PATCH 023/318] refact "cscli console" (#2834) --- cmd/crowdsec-cli/console.go | 253 ++++++++++++++++++------------ cmd/crowdsec-cli/console_table.go | 14 +- cmd/crowdsec-cli/main.go | 2 +- 3 files changed, 160 insertions(+), 109 deletions(-) diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index dcd6fb37f62..b1912825c06 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -25,32 +25,53 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func NewConsoleCmd() *cobra.Command { - var cmdConsole = &cobra.Command{ +type cliConsole struct { + cfg configGetter +} + +func NewCLIConsole(cfg configGetter) *cliConsole { + return &cliConsole{ + cfg: cfg, + } +} + +func (cli *cliConsole) NewCommand() *cobra.Command { + var cmd = &cobra.Command{ Use: "console [action]", Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := require.CAPI(csConfig); err != nil { + if err := require.CAPI(cfg); err != nil { return err } - if err := require.CAPIRegistered(csConfig); err != nil { + if err := require.CAPIRegistered(cfg); err != nil { return err } + return nil }, } + cmd.AddCommand(cli.newEnrollCmd()) + cmd.AddCommand(cli.newEnableCmd()) + cmd.AddCommand(cli.newDisableCmd()) + cmd.AddCommand(cli.newStatusCmd()) + + return cmd +} + +func (cli *cliConsole) newEnrollCmd() *cobra.Command { name := "" overwrite := false tags := []string{} opts := []string{} - cmdEnroll := &cobra.Command{ + cmd := &cobra.Command{ Use: "enroll [enroll-key]", Short: "Enroll this instance to https://app.crowdsec.net [requires local API]", Long: ` @@ -66,96 +87,107 @@ After running this command your will need to validate the enrollment in the weba valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) - apiURL, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) + RunE: func(_ *cobra.Command, args []string) error { + cfg := cli.cfg() + password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) + + apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) if err != nil { - return fmt.Errorf("could not parse CAPI URL: %s", err) + return fmt.Errorf("could not parse CAPI URL: %w", err) } - hub, err := require.Hub(csConfig, nil, nil) + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) if err != nil { - return fmt.Errorf("failed to get installed scenarios: %s", err) + return fmt.Errorf("failed to get installed scenarios: %w", err) } if len(scenarios) == 0 { scenarios = make([]string, 0) } - enable_opts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} + enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} if len(opts) != 0 { for _, opt := range opts { valid := false if opt == "all" { - enable_opts = csconfig.CONSOLE_CONFIGS + enableOpts = csconfig.CONSOLE_CONFIGS break } - for _, available_opt := range csconfig.CONSOLE_CONFIGS { - if opt == available_opt { + for _, availableOpt := range csconfig.CONSOLE_CONFIGS { + if opt == availableOpt { valid = true enable := true - for _, enabled_opt := range enable_opts { - if opt == enabled_opt { + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { enable = false continue } } if enable { - enable_opts = append(enable_opts, opt) + enableOpts = append(enableOpts, opt) } + break } } if !valid { return fmt.Errorf("option %s doesn't exist", opt) - } } } c, _ := apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Server.OnlineClient.Credentials.Login, + MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, Scenarios: scenarios, UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiURL, VersionPrefix: "v3", }) + resp, err := c.Auth.EnrollWatcher(context.Background(), args[0], name, tags, overwrite) if err != nil { - return fmt.Errorf("could not enroll instance: %s", err) + return fmt.Errorf("could not enroll instance: %w", err) } + if resp.Response.StatusCode == 200 && !overwrite { log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") return nil } - if err := SetConsoleOpts(enable_opts, true); err != nil { + if err := cli.setConsoleOpts(enableOpts, true); err != nil { return err } - for _, opt := range enable_opts { + for _, opt := range enableOpts { log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) } + log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") log.Info("Please restart crowdsec after accepting the enrollment.") + return nil }, } - cmdEnroll.Flags().StringVarP(&name, "name", "n", "", "Name to display in the console") - cmdEnroll.Flags().BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance") - cmdEnroll.Flags().StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console") - cmdEnroll.Flags().StringSliceVarP(&opts, "enable", "e", opts, "Enable console options") - cmdConsole.AddCommand(cmdEnroll) - var enableAll, disableAll bool + flags := cmd.Flags() + flags.StringVarP(&name, "name", "n", "", "Name to display in the console") + flags.BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance") + flags.StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console") + flags.StringSliceVarP(&opts, "enable", "e", opts, "Enable console options") + + return cmd +} + +func (cli *cliConsole) newEnableCmd() *cobra.Command { + var enableAll bool - cmdEnable := &cobra.Command{ + cmd := &cobra.Command{ Use: "enable [option]", Short: "Enable a console option", Example: "sudo cscli console enable tainted", @@ -163,9 +195,9 @@ After running this command your will need to validate the enrollment in the weba Enable given information push to the central API. Allows to empower the console`, ValidArgs: csconfig.CONSOLE_CONFIGS, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { if enableAll { - if err := SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true); err != nil { + if err := cli.setConsoleOpts(csconfig.CONSOLE_CONFIGS, true); err != nil { return err } log.Infof("All features have been enabled successfully") @@ -173,19 +205,26 @@ Enable given information push to the central API. Allows to empower the console` if len(args) == 0 { return fmt.Errorf("you must specify at least one feature to enable") } - if err := SetConsoleOpts(args, true); err != nil { + if err := cli.setConsoleOpts(args, true); err != nil { return err } log.Infof("%v have been enabled", args) } + log.Infof(ReloadMessage()) + return nil }, } - cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options") - cmdConsole.AddCommand(cmdEnable) + cmd.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options") + + return cmd +} - cmdDisable := &cobra.Command{ +func (cli *cliConsole) newDisableCmd() *cobra.Command { + var disableAll bool + + cmd := &cobra.Command{ Use: "disable [option]", Short: "Disable a console option", Example: "sudo cscli console disable tainted", @@ -193,47 +232,52 @@ Enable given information push to the central API. Allows to empower the console` Disable given information push to the central API.`, ValidArgs: csconfig.CONSOLE_CONFIGS, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { if disableAll { - if err := SetConsoleOpts(csconfig.CONSOLE_CONFIGS, false); err != nil { + if err := cli.setConsoleOpts(csconfig.CONSOLE_CONFIGS, false); err != nil { return err } log.Infof("All features have been disabled") } else { - if err := SetConsoleOpts(args, false); err != nil { + if err := cli.setConsoleOpts(args, false); err != nil { return err } log.Infof("%v have been disabled", args) } log.Infof(ReloadMessage()) + return nil }, } - cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options") - cmdConsole.AddCommand(cmdDisable) + cmd.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options") + + return cmd +} - cmdConsoleStatus := &cobra.Command{ +func (cli *cliConsole) newStatusCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "status", Short: "Shows status of the console options", Example: `sudo cscli console status`, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - switch csConfig.Cscli.Output { + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + consoleCfg := cfg.API.Server.ConsoleConfig + switch cfg.Cscli.Output { case "human": - cmdConsoleStatusTable(color.Output, *csConfig) + cmdConsoleStatusTable(color.Output, *consoleCfg) case "json": - c := csConfig.API.Server.ConsoleConfig out := map[string](*bool){ - csconfig.SEND_MANUAL_SCENARIOS: c.ShareManualDecisions, - csconfig.SEND_CUSTOM_SCENARIOS: c.ShareCustomScenarios, - csconfig.SEND_TAINTED_SCENARIOS: c.ShareTaintedScenarios, - csconfig.SEND_CONTEXT: c.ShareContext, - csconfig.CONSOLE_MANAGEMENT: c.ConsoleManagement, + csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions, + csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios, + csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios, + csconfig.SEND_CONTEXT: consoleCfg.ShareContext, + csconfig.CONSOLE_MANAGEMENT: consoleCfg.ConsoleManagement, } data, err := json.MarshalIndent(out, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %s", err) + return fmt.Errorf("failed to marshal configuration: %w", err) } fmt.Println(string(data)) case "raw": @@ -244,11 +288,11 @@ Disable given information push to the central API.`, } rows := [][]string{ - {csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)}, - {csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)}, - {csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)}, - {csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)}, - {csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ConsoleManagement)}, + {csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareManualDecisions)}, + {csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareCustomScenarios)}, + {csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareTaintedScenarios)}, + {csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *consoleCfg.ShareContext)}, + {csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *consoleCfg.ConsoleManagement)}, } for _, row := range rows { err = csvwriter.Write(row) @@ -258,132 +302,137 @@ Disable given information push to the central API.`, } csvwriter.Flush() } + return nil }, } - cmdConsole.AddCommand(cmdConsoleStatus) - return cmdConsole + return cmd } -func dumpConsoleConfig(c *csconfig.LocalApiServerCfg) error { - out, err := yaml.Marshal(c.ConsoleConfig) +func (cli *cliConsole) dumpConfig() error { + serverCfg := cli.cfg().API.Server + + out, err := yaml.Marshal(serverCfg.ConsoleConfig) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleConfigPath, err) + return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) } - if c.ConsoleConfigPath == "" { - c.ConsoleConfigPath = csconfig.DefaultConsoleConfigFilePath - log.Debugf("Empty console_path, defaulting to %s", c.ConsoleConfigPath) + if serverCfg.ConsoleConfigPath == "" { + serverCfg.ConsoleConfigPath = csconfig.DefaultConsoleConfigFilePath + log.Debugf("Empty console_path, defaulting to %s", serverCfg.ConsoleConfigPath) } - if err := os.WriteFile(c.ConsoleConfigPath, out, 0o600); err != nil { - return fmt.Errorf("while dumping console config to %s: %w", c.ConsoleConfigPath, err) + if err := os.WriteFile(serverCfg.ConsoleConfigPath, out, 0o600); err != nil { + return fmt.Errorf("while dumping console config to %s: %w", serverCfg.ConsoleConfigPath, err) } return nil } -func SetConsoleOpts(args []string, wanted bool) error { +func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { + cfg := cli.cfg() + consoleCfg := cfg.API.Server.ConsoleConfig + for _, arg := range args { switch arg { case csconfig.CONSOLE_MANAGEMENT: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ConsoleManagement != nil { - if *csConfig.API.Server.ConsoleConfig.ConsoleManagement == wanted { + if consoleCfg.ConsoleManagement != nil { + if *consoleCfg.ConsoleManagement == wanted { log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - *csConfig.API.Server.ConsoleConfig.ConsoleManagement = wanted + *consoleCfg.ConsoleManagement = wanted } } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - csConfig.API.Server.ConsoleConfig.ConsoleManagement = ptr.Of(wanted) + consoleCfg.ConsoleManagement = ptr.Of(wanted) } - if csConfig.API.Server.OnlineClient.Credentials != nil { + if cfg.API.Server.OnlineClient.Credentials != nil { changed := false - if wanted && csConfig.API.Server.OnlineClient.Credentials.PapiURL == "" { + if wanted && cfg.API.Server.OnlineClient.Credentials.PapiURL == "" { changed = true - csConfig.API.Server.OnlineClient.Credentials.PapiURL = types.PAPIBaseURL - } else if !wanted && csConfig.API.Server.OnlineClient.Credentials.PapiURL != "" { + cfg.API.Server.OnlineClient.Credentials.PapiURL = types.PAPIBaseURL + } else if !wanted && cfg.API.Server.OnlineClient.Credentials.PapiURL != "" { changed = true - csConfig.API.Server.OnlineClient.Credentials.PapiURL = "" + cfg.API.Server.OnlineClient.Credentials.PapiURL = "" } if changed { - fileContent, err := yaml.Marshal(csConfig.API.Server.OnlineClient.Credentials) + fileContent, err := yaml.Marshal(cfg.API.Server.OnlineClient.Credentials) if err != nil { - return fmt.Errorf("cannot marshal credentials: %s", err) + return fmt.Errorf("cannot marshal credentials: %w", err) } - log.Infof("Updating credentials file: %s", csConfig.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Updating credentials file: %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - err = os.WriteFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, fileContent, 0o600) + err = os.WriteFile(cfg.API.Server.OnlineClient.CredentialsFilePath, fileContent, 0o600) if err != nil { - return fmt.Errorf("cannot write credentials file: %s", err) + return fmt.Errorf("cannot write credentials file: %w", err) } } } case csconfig.SEND_CUSTOM_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil { - if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios == wanted { + if consoleCfg.ShareCustomScenarios != nil { + if *consoleCfg.ShareCustomScenarios == wanted { log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = wanted + *consoleCfg.ShareCustomScenarios = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = ptr.Of(wanted) + consoleCfg.ShareCustomScenarios = ptr.Of(wanted) } case csconfig.SEND_TAINTED_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios != nil { - if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios == wanted { + if consoleCfg.ShareTaintedScenarios != nil { + if *consoleCfg.ShareTaintedScenarios == wanted { log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = wanted + *consoleCfg.ShareTaintedScenarios = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = ptr.Of(wanted) + consoleCfg.ShareTaintedScenarios = ptr.Of(wanted) } case csconfig.SEND_MANUAL_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareManualDecisions != nil { - if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions == wanted { + if consoleCfg.ShareManualDecisions != nil { + if *consoleCfg.ShareManualDecisions == wanted { log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - *csConfig.API.Server.ConsoleConfig.ShareManualDecisions = wanted + *consoleCfg.ShareManualDecisions = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - csConfig.API.Server.ConsoleConfig.ShareManualDecisions = ptr.Of(wanted) + consoleCfg.ShareManualDecisions = ptr.Of(wanted) } case csconfig.SEND_CONTEXT: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareContext != nil { - if *csConfig.API.Server.ConsoleConfig.ShareContext == wanted { + if consoleCfg.ShareContext != nil { + if *consoleCfg.ShareContext == wanted { log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - *csConfig.API.Server.ConsoleConfig.ShareContext = wanted + *consoleCfg.ShareContext = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - csConfig.API.Server.ConsoleConfig.ShareContext = ptr.Of(wanted) + consoleCfg.ShareContext = ptr.Of(wanted) } default: return fmt.Errorf("unknown flag %s", arg) } } - if err := dumpConsoleConfig(csConfig.API.Server); err != nil { - return fmt.Errorf("failed writing console config: %s", err) + if err := cli.dumpConfig(); err != nil { + return fmt.Errorf("failed writing console config: %w", err) } return nil diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index 2a221e36f07..e71ea8113fb 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -9,7 +9,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) { +func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { t := newTable(out) t.SetRowLines(false) @@ -18,28 +18,30 @@ func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) { for _, option := range csconfig.CONSOLE_CONFIGS { activated := string(emoji.CrossMark) + switch option { case csconfig.SEND_CUSTOM_SCENARIOS: - if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios { + if *consoleCfg.ShareCustomScenarios { activated = string(emoji.CheckMarkButton) } case csconfig.SEND_MANUAL_SCENARIOS: - if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions { + if *consoleCfg.ShareManualDecisions { activated = string(emoji.CheckMarkButton) } case csconfig.SEND_TAINTED_SCENARIOS: - if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios { + if *consoleCfg.ShareTaintedScenarios { activated = string(emoji.CheckMarkButton) } case csconfig.SEND_CONTEXT: - if *csConfig.API.Server.ConsoleConfig.ShareContext { + if *consoleCfg.ShareContext { activated = string(emoji.CheckMarkButton) } case csconfig.CONSOLE_MANAGEMENT: - if *csConfig.API.Server.ConsoleConfig.ConsoleManagement { + if *consoleCfg.ConsoleManagement { activated = string(emoji.CheckMarkButton) } } + t.AddRow(option, activated, csconfig.CONSOLE_CONFIGS_HELP[option]) } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 63b7211b39b..27ac17d554f 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -243,7 +243,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLICapi().NewCommand()) cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(NewConsoleCmd()) + cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) From 4561eb787be6e27693195807ba61181018aa6755 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 20:15:16 +0100 Subject: [PATCH 024/318] bats: color formatter in CI (#2838) --- .github/workflows/bats-hub.yml | 5 +- .github/workflows/bats-mysql.yml | 5 +- .github/workflows/bats-postgres.yml | 5 +- .github/workflows/bats-sqlite-coverage.yml | 3 +- test/lib/color-formatter | 355 +++++++++++++++++++++ 5 files changed, 359 insertions(+), 14 deletions(-) create mode 100755 test/lib/color-formatter diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index fe45210ae96..7764da84812 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -8,9 +8,6 @@ on: GIST_BADGES_ID: required: true -env: - PREFIX_TEST_NAMES_WITH_FILE: true - jobs: build: strategy: @@ -50,7 +47,7 @@ jobs: - name: "Run hub tests" run: | ./test/bin/generate-hub-tests - ./test/run-tests test/dyn-bats/${{ matrix.test-file }} + ./test/run-tests ./test/dyn-bats/${{ matrix.test-file }} --formatter $(pwd)/test/lib/color-formatter - name: "Collect hub coverage" run: ./test/bin/collect-hub-coverage >> $GITHUB_ENV diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 902c25ba329..243da6eb25d 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -7,9 +7,6 @@ on: required: true type: string -env: - PREFIX_TEST_NAMES_WITH_FILE: true - jobs: build: name: "Functional tests" @@ -58,7 +55,7 @@ jobs: MYSQL_USER: root - name: "Run tests" - run: make bats-test + run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter env: DB_BACKEND: mysql MYSQL_HOST: 127.0.0.1 diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index e15f1e410c1..07d3cd8d2f1 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -3,9 +3,6 @@ name: (sub) Bats / Postgres on: workflow_call: -env: - PREFIX_TEST_NAMES_WITH_FILE: true - jobs: build: name: "Functional tests" @@ -67,7 +64,7 @@ jobs: PGUSER: postgres - name: "Run tests (DB_BACKEND: pgx)" - run: make bats-test + run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter env: DB_BACKEND: pgx PGHOST: 127.0.0.1 diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 36194555e1d..46a5dd8bc86 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -4,7 +4,6 @@ on: workflow_call: env: - PREFIX_TEST_NAMES_WITH_FILE: true TEST_COVERAGE: true jobs: @@ -42,7 +41,7 @@ jobs: make clean bats-build bats-fixture BUILD_STATIC=1 - name: "Run tests" - run: make bats-test + run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter - name: "Collect coverage data" run: | diff --git a/test/lib/color-formatter b/test/lib/color-formatter new file mode 100755 index 00000000000..aee8d750698 --- /dev/null +++ b/test/lib/color-formatter @@ -0,0 +1,355 @@ +#!/usr/bin/env bash + +# +# Taken from pretty formatter, minus the cursor movements. +# Used in gihtub workflows CI where color is allowed. +# + +set -e + +# shellcheck source=lib/bats-core/formatter.bash +source "$BATS_ROOT/lib/bats-core/formatter.bash" + +BASE_PATH=. +BATS_ENABLE_TIMING= + +while [[ "$#" -ne 0 ]]; do + case "$1" in + -T) + BATS_ENABLE_TIMING="-T" + ;; + --base-path) + shift + normalize_base_path BASE_PATH "$1" + ;; + esac + shift +done + +update_count_column_width() { + count_column_width=$((${#count} * 2 + 2)) + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + # additional space for ' in %s sec' + count_column_width=$((count_column_width + ${#SECONDS} + 8)) + fi + # also update dependent value + update_count_column_left +} + +update_screen_width() { + screen_width="$(tput cols)" + # also update dependent value + update_count_column_left +} + +update_count_column_left() { + count_column_left=$((screen_width - count_column_width)) +} + +# avoid unset variables +count=0 +screen_width=80 +update_count_column_width +#update_screen_width +test_result= + +#trap update_screen_width WINCH + +begin() { + test_result= # reset to avoid carrying over result state from previous test + line_backoff_count=0 + #go_to_column 0 + #update_count_column_width + #buffer_with_truncation $((count_column_left - 1)) ' %s' "$name" + #clear_to_end_of_line + #go_to_column $count_column_left + #if [[ -n "$BATS_ENABLE_TIMING" ]]; then + # buffer "%${#count}s/${count} in %s sec" "$index" "$SECONDS" + #else + # buffer "%${#count}s/${count}" "$index" + #fi + #go_to_column 1 + buffer "%${#count}s" "$index" +} + +finish_test() { + #move_up $line_backoff_count + #go_to_column 0 + buffer "$@" + if [[ -n "${TIMEOUT-}" ]]; then + set_color 2 + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + buffer ' [%s (timeout: %s)]' "$TIMING" "$TIMEOUT" + else + buffer ' [timeout: %s]' "$TIMEOUT" + fi + else + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + set_color 2 + buffer ' [%s]' "$TIMING" + fi + fi + advance + move_down $((line_backoff_count - 1)) +} + +pass() { + local TIMING="${1:-}" + finish_test ' ✓ %s' "$name" + test_result=pass +} + +skip() { + local reason="$1" TIMING="${2:-}" + if [[ -n "$reason" ]]; then + reason=": $reason" + fi + finish_test ' - %s (skipped%s)' "$name" "$reason" + test_result=skip +} + +fail() { + local TIMING="${1:-}" + set_color 1 bold + finish_test ' ✗ %s' "$name" + test_result=fail +} + +timeout() { + local TIMING="${1:-}" + set_color 3 bold + TIMEOUT="${2:-}" finish_test ' ✗ %s' "$name" + test_result=timeout +} + +log() { + case ${test_result} in + pass) + clear_color + ;; + fail) + set_color 1 + ;; + timeout) + set_color 3 + ;; + esac + buffer ' %s\n' "$1" + clear_color +} + +summary() { + if [ "$failures" -eq 0 ]; then + set_color 2 bold + else + set_color 1 bold + fi + + buffer '\n%d test' "$count" + if [[ "$count" -ne 1 ]]; then + buffer 's' + fi + + buffer ', %d failure' "$failures" + if [[ "$failures" -ne 1 ]]; then + buffer 's' + fi + + if [[ "$skipped" -gt 0 ]]; then + buffer ', %d skipped' "$skipped" + fi + + if ((timed_out > 0)); then + buffer ', %d timed out' "$timed_out" + fi + + not_run=$((count - passed - failures - skipped - timed_out)) + if [[ "$not_run" -gt 0 ]]; then + buffer ', %d not run' "$not_run" + fi + + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + buffer " in $SECONDS seconds" + fi + + buffer '\n' + clear_color +} + +buffer_with_truncation() { + local width="$1" + shift + local string + + # shellcheck disable=SC2059 + printf -v 'string' -- "$@" + + if [[ "${#string}" -gt "$width" ]]; then + buffer '%s...' "${string:0:$((width - 4))}" + else + buffer '%s' "$string" + fi +} + +move_up() { + if [[ $1 -gt 0 ]]; then # avoid moving if we got 0 + buffer '\x1B[%dA' "$1" + fi +} + +move_down() { + if [[ $1 -gt 0 ]]; then # avoid moving if we got 0 + buffer '\x1B[%dB' "$1" + fi +} + +go_to_column() { + local column="$1" + buffer '\x1B[%dG' $((column + 1)) +} + +clear_to_end_of_line() { + buffer '\x1B[K' +} + +advance() { + clear_to_end_of_line + buffer '\n' + clear_color +} + +set_color() { + local color="$1" + local weight=22 + + if [[ "${2:-}" == 'bold' ]]; then + weight=1 + fi + buffer '\x1B[%d;%dm' "$((30 + color))" "$weight" +} + +clear_color() { + buffer '\x1B[0m' +} + +_buffer= + +buffer() { + local content + # shellcheck disable=SC2059 + printf -v content -- "$@" + _buffer+="$content" +} + +prefix_buffer_with() { + local old_buffer="$_buffer" + _buffer='' + "$@" + _buffer="$_buffer$old_buffer" +} + +flush() { + printf '%s' "$_buffer" + _buffer= +} + +finish() { + flush + printf '\n' +} + +trap finish EXIT +trap '' INT + +bats_tap_stream_plan() { + count="$1" + index=0 + passed=0 + failures=0 + skipped=0 + timed_out=0 + name= + update_count_column_width +} + +bats_tap_stream_begin() { + index="$1" + name="$2" + begin + flush +} + +bats_tap_stream_ok() { + index="$1" + name="$2" + ((++passed)) + + pass "${BATS_FORMATTER_TEST_DURATION:-}" +} + +bats_tap_stream_skipped() { + index="$1" + name="$2" + ((++skipped)) + skip "$3" "${BATS_FORMATTER_TEST_DURATION:-}" +} + +bats_tap_stream_not_ok() { + index="$1" + name="$2" + + if [[ ${BATS_FORMATTER_TEST_TIMEOUT-x} != x ]]; then + timeout "${BATS_FORMATTER_TEST_DURATION:-}" "${BATS_FORMATTER_TEST_TIMEOUT}s" + ((++timed_out)) + else + fail "${BATS_FORMATTER_TEST_DURATION:-}" + ((++failures)) + fi + +} + +bats_tap_stream_comment() { # + local scope=$2 + # count the lines we printed after the begin text, + if [[ $line_backoff_count -eq 0 && $scope == begin ]]; then + # if this is the first line after begin, go down one line + buffer "\n" + ((++line_backoff_count)) # prefix-increment to avoid "error" due to returning 0 + fi + + ((++line_backoff_count)) + ((line_backoff_count += ${#1} / screen_width)) # account for linebreaks due to length + log "$1" +} + +bats_tap_stream_suite() { + #test_file="$1" + line_backoff_count=0 + index= + # indicate filename for failures + local file_name="${1#"$BASE_PATH"}" + name="File $file_name" + set_color 4 bold + buffer "%s\n" "$file_name" + clear_color +} + +line_backoff_count=0 +bats_tap_stream_unknown() { # + local scope=$2 + # count the lines we printed after the begin text, (or after suite, in case of syntax errors) + if [[ $line_backoff_count -eq 0 && ($scope == begin || $scope == suite) ]]; then + # if this is the first line after begin, go down one line + buffer "\n" + ((++line_backoff_count)) # prefix-increment to avoid "error" due to returning 0 + fi + + ((++line_backoff_count)) + ((line_backoff_count += ${#1} / screen_width)) # account for linebreaks due to length + buffer "%s\n" "$1" + flush +} + +bats_parse_internal_extended_tap + +summary From d34fb7e8a85deaa31697290cf583824911fa6913 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:22:19 +0100 Subject: [PATCH 025/318] log processor: share apiclient in output goroutines (#2836) --- .golangci.yml | 10 ++- cmd/crowdsec/api.go | 4 +- cmd/crowdsec/crowdsec.go | 56 ++++++++++++--- cmd/crowdsec/lapiclient.go | 92 +++++++++++++++++++++++++ cmd/crowdsec/metrics.go | 13 ++-- cmd/crowdsec/output.go | 105 +++++------------------------ cmd/crowdsec/run_in_svc.go | 12 ++-- cmd/crowdsec/run_in_svc_windows.go | 7 +- cmd/crowdsec/serve.go | 27 ++++++-- test/bats/01_crowdsec.bats | 3 + test/bats/40_live-ban.bats | 21 ++++-- 11 files changed, 229 insertions(+), 121 deletions(-) create mode 100644 cmd/crowdsec/lapiclient.go diff --git a/.golangci.yml b/.golangci.yml index 3161b2c0aaf..e605ac079d4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,7 +11,7 @@ run: linters-settings: cyclop: # lower this after refactoring - max-complexity: 70 + max-complexity: 53 gci: sections: @@ -26,7 +26,7 @@ linters-settings: gocyclo: # lower this after refactoring - min-complexity: 70 + min-complexity: 49 funlen: # Checks the number of lines in a function. @@ -46,7 +46,7 @@ linters-settings: maintidx: # raise this after refactoring - under: 9 + under: 11 misspell: locale: US @@ -263,6 +263,10 @@ issues: - perfsprint text: "fmt.Sprintf can be replaced .*" + - linters: + - perfsprint + text: "fmt.Errorf can be replaced with errors.New" + # # Will fix, easy but some neurons required # diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index a1e933cba89..4ac5c3ce96f 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -56,7 +56,8 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { return apiServer, nil } -func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) { +func serveAPIServer(apiServer *apiserver.APIServer) { + apiReady := make(chan bool, 1) apiTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveAPIServer") go func() { @@ -80,6 +81,7 @@ func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) { } return nil }) + <-apiReady } func hasPlugins(profiles []*csconfig.ProfileCfg) bool { diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 774b9d381cf..d4cd2d3cf74 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "os" "path/filepath" @@ -13,8 +14,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition" - "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" @@ -56,63 +57,86 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H //start go-routines for parsing, buckets pour and outputs. parserWg := &sync.WaitGroup{} + parsersTomb.Go(func() error { parserWg.Add(1) + for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ { parsersTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runParse") + if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors log.Fatalf("starting parse error : %s", err) return err } + return nil }) } parserWg.Done() + return nil }) parserWg.Wait() bucketWg := &sync.WaitGroup{} + bucketsTomb.Go(func() error { bucketWg.Add(1) /*restore previous state as well if present*/ if cConfig.Crowdsec.BucketStateFile != "" { log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile) + if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil { - return fmt.Errorf("unable to restore buckets : %s", err) + return fmt.Errorf("unable to restore buckets: %w", err) } } for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ { bucketsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runPour") + if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil { log.Fatalf("starting pour error : %s", err) return err } + return nil }) } bucketWg.Done() + return nil }) bucketWg.Wait() + apiClient, err := AuthenticatedLAPIClient(*cConfig.API.Client.Credentials, hub) + if err != nil { + return err + } + + log.Debugf("Starting HeartBeat service") + apiClient.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb) + outputWg := &sync.WaitGroup{} + outputsTomb.Go(func() error { outputWg.Add(1) + for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ { outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") - if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials, hub); err != nil { + + if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient); err != nil { log.Fatalf("starting outputs error : %s", err) return err } + return nil }) } outputWg.Done() + return nil }) outputWg.Wait() @@ -122,16 +146,16 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H if cConfig.Prometheus.Level == "aggregated" { aggregated = true } + if err := acquisition.GetMetrics(dataSources, aggregated); err != nil { return fmt.Errorf("while fetching prometheus metrics for datasources: %w", err) } - } + log.Info("Starting processing data") if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { - log.Fatalf("starting acquisition error : %s", err) - return err + return fmt.Errorf("starting acquisition error: %w", err) } return nil @@ -140,11 +164,13 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) { crowdsecTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveCrowdsec") + go func() { defer trace.CatchPanic("crowdsec/runCrowdsec") // this logs every time, even at config reload log.Debugf("running agent after %s ms", time.Since(crowdsecT0)) agentReady <- true + if err := runCrowdsec(cConfig, parsers, hub); err != nil { log.Fatalf("unable to start crowdsec routines: %s", err) } @@ -156,16 +182,20 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub */ waitOnTomb() log.Debugf("Shutting down crowdsec routines") + if err := ShutdownCrowdsecRoutines(); err != nil { log.Fatalf("unable to shutdown crowdsec routines: %s", err) } + log.Debugf("everything is dead, return crowdsecTomb") + if dumpStates { dumpParserState() dumpOverflowState() dumpBucketsPour() os.Exit(0) } + return nil }) } @@ -175,55 +205,65 @@ func dumpBucketsPour() { if err != nil { log.Fatalf("open: %s", err) } + out, err := yaml.Marshal(leaky.BucketPourCache) if err != nil { log.Fatalf("marshal: %s", err) } + b, err := fd.Write(out) if err != nil { log.Fatalf("write: %s", err) } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { log.Fatalf(" close: %s", err) } } func dumpParserState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if err != nil { log.Fatalf("open: %s", err) } + out, err := yaml.Marshal(parser.StageParseCache) if err != nil { log.Fatalf("marshal: %s", err) } + b, err := fd.Write(out) if err != nil { log.Fatalf("write: %s", err) } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { log.Fatalf(" close: %s", err) } } func dumpOverflowState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if err != nil { log.Fatalf("open: %s", err) } + out, err := yaml.Marshal(bucketOverflows) if err != nil { log.Fatalf("marshal: %s", err) } + b, err := fd.Write(out) if err != nil { log.Fatalf("write: %s", err) } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { log.Fatalf(" close: %s", err) } diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go new file mode 100644 index 00000000000..fd29aa9d99b --- /dev/null +++ b/cmd/crowdsec/lapiclient.go @@ -0,0 +1,92 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + if err != nil { + return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) + } + + appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + if err != nil { + return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) + } + + installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) + installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) + installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) + + apiURL, err := url.Parse(credentials.URL) + if err != nil { + return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) + } + + papiURL, err := url.Parse(credentials.PapiURL) + if err != nil { + return nil, fmt.Errorf("parsing polling api url ('%s'): %w", credentials.PapiURL, err) + } + + password := strfmt.Password(credentials.Password) + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: credentials.Login, + Password: password, + Scenarios: installedScenariosAndAppsecRules, + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiURL, + PapiURL: papiURL, + VersionPrefix: "v1", + UpdateScenario: func() ([]string, error) { + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + if err != nil { + return nil, err + } + appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + if err != nil { + return nil, err + } + ret := make([]string, 0, len(scenarios)+len(appsecRules)) + ret = append(ret, scenarios...) + ret = append(ret, appsecRules...) + + return ret, nil + }, + }) + if err != nil { + return nil, fmt.Errorf("new client api: %w", err) + } + + authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + MachineID: &credentials.Login, + Password: &password, + Scenarios: installedScenariosAndAppsecRules, + }) + if err != nil { + return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err) + } + + var expiration time.Time + if err := expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { + return nil, fmt.Errorf("unable to parse jwt expiration: %w", err) + } + + client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token + client.GetClient().Transport.(*apiclient.JWTTransport).Expiration = expiration + + return client, nil +} diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index fa2d8d5de32..1199af0fe16 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -114,13 +114,17 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha } decisionsFilters := make(map[string][]string, 0) + decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters) if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) + return } + globalActiveDecisions.Reset() + for _, d := range decisions { globalActiveDecisions.With(prometheus.Labels{"reason": d.Scenario, "origin": d.Origin, "action": d.Type}).Set(float64(d.Count)) } @@ -136,6 +140,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) + return } @@ -173,11 +178,12 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { globalActiveDecisions, globalAlerts, parser.NodesWlHitsOk, parser.NodesWlHits, cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, ) - } } -func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, apiReady chan bool, agentReady chan bool) { +func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, agentReady chan bool) { + <-agentReady + if !config.Enabled { return } @@ -185,9 +191,8 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, defer trace.CatchPanic("crowdsec/servePrometheus") http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient)) - <-apiReady - <-agentReady log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) + if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { log.Warningf("prometheus: %s", err) } diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index ad53ce4c827..c4a2c0b6ac1 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -3,18 +3,12 @@ package main import ( "context" "fmt" - "net/url" "sync" "time" - "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" @@ -22,7 +16,6 @@ import ( ) func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { - var dedupCache []*models.Alert for idx, alert := range alerts { @@ -32,16 +25,21 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { dedupCache = append(dedupCache, alert.Alert) continue } + for k, src := range alert.Sources { refsrc := *alert.Alert //copy + log.Tracef("source[%s]", k) + refsrc.Source = &src dedupCache = append(dedupCache, &refsrc) } } + if len(dedupCache) != len(alerts) { log.Tracef("went from %d to %d alerts", len(alerts), len(dedupCache)) } + return dedupCache, nil } @@ -52,93 +50,25 @@ func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error if err != nil { return fmt.Errorf("failed to transform alerts for api: %w", err) } + _, _, err = client.Alerts.Add(ctx, alertsToPush) if err != nil { return fmt.Errorf("failed sending alert to LAPI: %w", err) } + return nil } var bucketOverflows []types.Event -func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, - postOverflowCTX parser.UnixParserCtx, postOverflowNodes []parser.Node, - apiConfig csconfig.ApiCredentialsCfg, hub *cwhub.Hub) error { +func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, postOverflowCTX parser.UnixParserCtx, + postOverflowNodes []parser.Node, client *apiclient.ApiClient) error { + var ( + cache []types.RuntimeAlert + cacheMutex sync.Mutex + ) - var err error ticker := time.NewTicker(1 * time.Second) - - var cache []types.RuntimeAlert - var cacheMutex sync.Mutex - - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("loading list of installed hub scenarios: %w", err) - } - - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) - if err != nil { - return fmt.Errorf("loading list of installed hub appsec rules: %w", err) - } - - installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) - - apiURL, err := url.Parse(apiConfig.URL) - if err != nil { - return fmt.Errorf("parsing api url ('%s'): %w", apiConfig.URL, err) - } - papiURL, err := url.Parse(apiConfig.PapiURL) - if err != nil { - return fmt.Errorf("parsing polling api url ('%s'): %w", apiConfig.PapiURL, err) - } - password := strfmt.Password(apiConfig.Password) - - Client, err := apiclient.NewClient(&apiclient.Config{ - MachineID: apiConfig.Login, - Password: password, - Scenarios: installedScenariosAndAppsecRules, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), - URL: apiURL, - PapiURL: papiURL, - VersionPrefix: "v1", - UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(scenarios)+len(appsecRules)) - ret = append(ret, scenarios...) - ret = append(ret, appsecRules...) - return ret, nil - }, - }) - if err != nil { - return fmt.Errorf("new client api: %w", err) - } - authResp, _, err := Client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ - MachineID: &apiConfig.Login, - Password: &password, - Scenarios: installedScenariosAndAppsecRules, - }) - if err != nil { - return fmt.Errorf("authenticate watcher (%s): %w", apiConfig.Login, err) - } - - if err := Client.GetClient().Transport.(*apiclient.JWTTransport).Expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { - return fmt.Errorf("unable to parse jwt expiration: %w", err) - } - - Client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token - - //start the heartbeat service - log.Debugf("Starting HeartBeat service") - Client.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb) LOOP: for { select { @@ -149,7 +79,7 @@ LOOP: newcache := make([]types.RuntimeAlert, 0) cache = newcache cacheMutex.Unlock() - if err := PushAlerts(cachecopy, Client); err != nil { + if err := PushAlerts(cachecopy, client); err != nil { log.Errorf("while pushing to api : %s", err) //just push back the events to the queue cacheMutex.Lock() @@ -162,10 +92,11 @@ LOOP: cacheMutex.Lock() cachecopy := cache cacheMutex.Unlock() - if err := PushAlerts(cachecopy, Client); err != nil { + if err := PushAlerts(cachecopy, client); err != nil { log.Errorf("while pushing leftovers to api : %s", err) } } + break LOOP case event := <-overflow: /*if alert is empty and mapKey is present, the overflow is just to cleanup bucket*/ @@ -176,7 +107,7 @@ LOOP: /* process post overflow parser nodes */ event, err := parser.Parse(postOverflowCTX, event, postOverflowNodes) if err != nil { - return fmt.Errorf("postoverflow failed : %s", err) + return fmt.Errorf("postoverflow failed: %w", err) } log.Printf("%s", *event.Overflow.Alert.Message) //if the Alert is nil, it's to signal bucket is ready for GC, don't track this @@ -206,6 +137,6 @@ LOOP: } ticker.Stop() - return nil + return nil } diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go index 2020537908d..5a8bc9a6cd3 100644 --- a/cmd/crowdsec/run_in_svc.go +++ b/cmd/crowdsec/run_in_svc.go @@ -33,7 +33,6 @@ func StartRunSvc() error { log.Infof("Crowdsec %s", version.String()) - apiReady := make(chan bool, 1) agentReady := make(chan bool, 1) // Enable profiling early @@ -46,14 +45,19 @@ func StartRunSvc() error { dbClient, err = database.NewClient(cConfig.DbConfig) if err != nil { - return fmt.Errorf("unable to create database client: %s", err) + return fmt.Errorf("unable to create database client: %w", err) } } registerPrometheus(cConfig.Prometheus) - go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady) + go servePrometheus(cConfig.Prometheus, dbClient, agentReady) + } else { + // avoid leaking the channel + go func() { + <-agentReady + }() } - return Serve(cConfig, apiReady, agentReady) + return Serve(cConfig, agentReady) } diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go index 991f7ae4491..7845e9c58b5 100644 --- a/cmd/crowdsec/run_in_svc_windows.go +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -73,7 +73,6 @@ func WindowsRun() error { log.Infof("Crowdsec %s", version.String()) - apiReady := make(chan bool, 1) agentReady := make(chan bool, 1) // Enable profiling early @@ -85,11 +84,11 @@ func WindowsRun() error { dbClient, err = database.NewClient(cConfig.DbConfig) if err != nil { - return fmt.Errorf("unable to create database client: %s", err) + return fmt.Errorf("unable to create database client: %w", err) } } registerPrometheus(cConfig.Prometheus) - go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady) + go servePrometheus(cConfig.Prometheus, dbClient, agentReady) } - return Serve(cConfig, apiReady, agentReady) + return Serve(cConfig, agentReady) } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index a5c8e24cf3f..22f65b927a0 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -42,7 +42,9 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { if err := leaky.ShutdownAllBuckets(buckets); err != nil { log.Warningf("Failed to shut down routines : %s", err) } + log.Printf("Shutdown is finished, buckets are in %s", tmpFile) + return nil } @@ -66,15 +68,16 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { if !cConfig.DisableAPI { if flags.DisableCAPI { log.Warningf("Communication with CrowdSec Central API disabled from args") + cConfig.API.Server.OnlineClient = nil } + apiServer, err := initAPIServer(cConfig) if err != nil { return nil, fmt.Errorf("unable to init api server: %w", err) } - apiReady := make(chan bool, 1) - serveAPIServer(apiServer, apiReady) + serveAPIServer(apiServer) } if !cConfig.DisableAgent { @@ -110,6 +113,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err) } } + return cConfig, nil } @@ -117,10 +121,12 @@ func ShutdownCrowdsecRoutines() error { var reterr error log.Debugf("Shutting down crowdsec sub-routines") + if len(dataSources) > 0 { acquisTomb.Kill(nil) log.Debugf("waiting for acquisition to finish") drainChan(inputLineChan) + if err := acquisTomb.Wait(); err != nil { log.Warningf("Acquisition returned error : %s", err) reterr = err @@ -130,6 +136,7 @@ func ShutdownCrowdsecRoutines() error { log.Debugf("acquisition is finished, wait for parser/bucket/ouputs.") parsersTomb.Kill(nil) drainChan(inputEventChan) + if err := parsersTomb.Wait(); err != nil { log.Warningf("Parsers returned error : %s", err) reterr = err @@ -160,6 +167,7 @@ func ShutdownCrowdsecRoutines() error { log.Warningf("Outputs returned error : %s", err) reterr = err } + log.Debugf("outputs are done") case <-time.After(3 * time.Second): // this can happen if outputs are stuck in a http retry loop @@ -181,6 +189,7 @@ func shutdownAPI() error { } log.Debugf("done") + return nil } @@ -193,6 +202,7 @@ func shutdownCrowdsec() error { } log.Debugf("done") + return nil } @@ -292,10 +302,11 @@ func HandleSignals(cConfig *csconfig.Config) error { if err == nil { log.Warning("Crowdsec service shutting down") } + return err } -func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) error { +func Serve(cConfig *csconfig.Config, agentReady chan bool) error { acquisTomb = tomb.Tomb{} parsersTomb = tomb.Tomb{} bucketsTomb = tomb.Tomb{} @@ -325,6 +336,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e if cConfig.API.CTI != nil && *cConfig.API.CTI.Enabled { log.Infof("Crowdsec CTI helper enabled") + if err := exprhelpers.InitCrowdsecCTI(cConfig.API.CTI.Key, cConfig.API.CTI.CacheTimeout, cConfig.API.CTI.CacheSize, cConfig.API.CTI.LogLevel); err != nil { return fmt.Errorf("failed to init crowdsec cti: %w", err) } @@ -337,6 +349,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e if flags.DisableCAPI { log.Warningf("Communication with CrowdSec Central API disabled from args") + cConfig.API.Server.OnlineClient = nil } @@ -346,10 +359,8 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e } if !flags.TestMode { - serveAPIServer(apiServer, apiReady) + serveAPIServer(apiServer) } - } else { - apiReady <- true } if !cConfig.DisableAgent { @@ -366,6 +377,8 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e // if it's just linting, we're done if !flags.TestMode { serveCrowdsec(csParsers, cConfig, hub, agentReady) + } else { + agentReady <- true } } else { agentReady <- true @@ -395,6 +408,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e for _, ch := range waitChans { <-ch + switch ch { case apiTomb.Dead(): log.Infof("api shutdown") @@ -402,5 +416,6 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e log.Infof("crowdsec shutdown") } } + return nil } diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index be06ac9261a..a585930e34c 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -75,6 +75,9 @@ teardown() { rune -0 ./instance-crowdsec start-pid PID="$output" + + sleep .5 + assert_file_exists "$log_old" assert_file_contains "$log_old" "Starting processing data" diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index c6b8ddf1563..a544f67be18 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -41,10 +41,23 @@ teardown() { echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" ./instance-crowdsec start + + sleep 0.2 + fake_log >>"${tmpfile}" - sleep 2 + + sleep 0.2 + rm -f -- "${tmpfile}" - rune -0 cscli decisions list -o json - rune -0 jq -r '.[].decisions[0].value' <(output) - assert_output '1.1.1.172' + + found=0 + # this may take some time in CI + for _ in $(seq 1 10); do + if cscli decisions list -o json | jq -r '.[].decisions[0].value' | grep -q '1.1.1.172'; then + found=1 + break + fi + sleep 0.2 + done + assert_equal 1 "${found}" } From 45571cea08591962b515ed903b0b00488a4f7c13 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 Feb 2024 09:47:12 +0100 Subject: [PATCH 026/318] use go 1.21.7 (#2830) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 3 ++- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 7764da84812..075480485ff 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 243da6eb25d..5c019933304 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 07d3cd8d2f1..0f3c69ccefa 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 46a5dd8bc86..436eb0f04a4 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 26c981143ad..7c6a6621de4 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4b262f13d09..bdc16e650f6 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -48,10 +48,11 @@ jobs: with: # required to pick up tags for BUILD_VERSION fetch-depth: 0 + - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 63781a7b25e..efe16ed66d9 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index e8840c07f4e..865b2782a63 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 202882791e7..d251677fd46 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 2369c09dfa6..420c521fa58 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.6-alpine3.18 AS build +FROM golang:1.21.7-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index ba0cd20fb43..48753e7acdb 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.6-bookworm AS build +FROM golang:1.21.7-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 82caba42bae..791f41f50ba 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.6' + version: '1.21.7' - pwsh: | choco install -y make From 2bbf0b4762ad58f5e50858132085ac4586502008 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 Feb 2024 11:19:13 +0100 Subject: [PATCH 027/318] re-generate ent code (#2844) --- go.sum | 2 + pkg/database/ent/alert.go | 28 +- pkg/database/ent/alert/alert.go | 203 +++ pkg/database/ent/alert/where.go | 1525 +++++---------------- pkg/database/ent/alert_create.go | 221 +-- pkg/database/ent/alert_delete.go | 51 +- pkg/database/ent/alert_query.go | 279 ++-- pkg/database/ent/alert_update.go | 738 ++-------- pkg/database/ent/bouncer.go | 22 +- pkg/database/ent/bouncer/bouncer.go | 65 + pkg/database/ent/bouncer/where.go | 687 ++-------- pkg/database/ent/bouncer_create.go | 129 +- pkg/database/ent/bouncer_delete.go | 51 +- pkg/database/ent/bouncer_query.go | 239 ++-- pkg/database/ent/bouncer_update.go | 286 +--- pkg/database/ent/client.go | 466 ++++++- pkg/database/ent/config.go | 65 - pkg/database/ent/configitem.go | 22 +- pkg/database/ent/configitem/configitem.go | 30 + pkg/database/ent/configitem/where.go | 299 +--- pkg/database/ent/configitem_create.go | 87 +- pkg/database/ent/configitem_delete.go | 51 +- pkg/database/ent/configitem_query.go | 239 ++-- pkg/database/ent/configitem_update.go | 162 +-- pkg/database/ent/context.go | 33 - pkg/database/ent/decision.go | 24 +- pkg/database/ent/decision/decision.go | 105 ++ pkg/database/ent/decision/where.go | 930 +++---------- pkg/database/ent/decision_create.go | 158 +-- pkg/database/ent/decision_delete.go | 51 +- pkg/database/ent/decision_query.go | 249 ++-- pkg/database/ent/decision_update.go | 444 +----- pkg/database/ent/ent.go | 233 +++- pkg/database/ent/event.go | 24 +- pkg/database/ent/event/event.go | 50 + pkg/database/ent/event/where.go | 322 +---- pkg/database/ent/event_create.go | 92 +- pkg/database/ent/event_delete.go | 51 +- pkg/database/ent/event_query.go | 249 ++-- pkg/database/ent/event_update.go | 196 +-- pkg/database/ent/hook/hook.go | 49 +- pkg/database/ent/machine.go | 24 +- pkg/database/ent/machine/machine.go | 92 ++ pkg/database/ent/machine/where.go | 766 +++-------- pkg/database/ent/machine_create.go | 140 +- pkg/database/ent/machine_delete.go | 51 +- pkg/database/ent/machine_query.go | 247 ++-- pkg/database/ent/machine_update.go | 352 +---- pkg/database/ent/meta.go | 24 +- pkg/database/ent/meta/meta.go | 50 + pkg/database/ent/meta/where.go | 342 +---- pkg/database/ent/meta_create.go | 92 +- pkg/database/ent/meta_delete.go | 51 +- pkg/database/ent/meta_query.go | 249 ++-- pkg/database/ent/meta_update.go | 196 +-- pkg/database/ent/mutation.go | 112 +- pkg/database/ent/runtime/runtime.go | 4 +- pkg/database/ent/tx.go | 36 +- 58 files changed, 4026 insertions(+), 8009 deletions(-) delete mode 100644 pkg/database/ent/config.go delete mode 100644 pkg/database/ent/context.go diff --git a/go.sum b/go.sum index 8fa2021316b..2daf22cc99c 100644 --- a/go.sum +++ b/go.sum @@ -542,6 +542,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 2649923bf5e..5cb4d1a352c 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -67,6 +68,7 @@ type Alert struct { // The values are being populated by the AlertQuery when eager-loading is set. Edges AlertEdges `json:"edges"` machine_alerts *int + selectValues sql.SelectValues } // AlertEdges holds the relations/edges for other nodes in the graph. @@ -142,7 +144,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) { case alert.ForeignKeys[0]: // machine_alerts values[i] = new(sql.NullInt64) default: - return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -309,36 +311,44 @@ func (a *Alert) assignValues(columns []string, values []any) error { a.machine_alerts = new(int) *a.machine_alerts = int(value.Int64) } + default: + a.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Alert. +// This includes values selected through modifiers, order, etc. +func (a *Alert) Value(name string) (ent.Value, error) { + return a.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Alert entity. func (a *Alert) QueryOwner() *MachineQuery { - return (&AlertClient{config: a.config}).QueryOwner(a) + return NewAlertClient(a.config).QueryOwner(a) } // QueryDecisions queries the "decisions" edge of the Alert entity. func (a *Alert) QueryDecisions() *DecisionQuery { - return (&AlertClient{config: a.config}).QueryDecisions(a) + return NewAlertClient(a.config).QueryDecisions(a) } // QueryEvents queries the "events" edge of the Alert entity. func (a *Alert) QueryEvents() *EventQuery { - return (&AlertClient{config: a.config}).QueryEvents(a) + return NewAlertClient(a.config).QueryEvents(a) } // QueryMetas queries the "metas" edge of the Alert entity. func (a *Alert) QueryMetas() *MetaQuery { - return (&AlertClient{config: a.config}).QueryMetas(a) + return NewAlertClient(a.config).QueryMetas(a) } // Update returns a builder for updating this Alert. // Note that you need to call Alert.Unwrap() before calling this method if this Alert // was returned from a transaction, and the transaction was committed or rolled back. func (a *Alert) Update() *AlertUpdateOne { - return (&AlertClient{config: a.config}).UpdateOne(a) + return NewAlertClient(a.config).UpdateOne(a) } // Unwrap unwraps the Alert entity that was returned from a transaction after it was closed, @@ -435,9 +445,3 @@ func (a *Alert) String() string { // Alerts is a parsable slice of Alert. type Alerts []*Alert - -func (a Alerts) config(cfg config) { - for _i := range a { - a[_i].config = cfg - } -} diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index abee13fb97a..eb9f1d10788 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -4,6 +4,9 @@ package alert import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -168,3 +171,203 @@ var ( // DefaultSimulated holds the default value on creation for the "simulated" field. DefaultSimulated bool ) + +// OrderOption defines the ordering options for the Alert queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByScenario orders the results by the scenario field. +func ByScenario(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenario, opts...).ToFunc() +} + +// ByBucketId orders the results by the bucketId field. +func ByBucketId(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBucketId, opts...).ToFunc() +} + +// ByMessage orders the results by the message field. +func ByMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMessage, opts...).ToFunc() +} + +// ByEventsCountField orders the results by the eventsCount field. +func ByEventsCountField(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEventsCount, opts...).ToFunc() +} + +// ByStartedAt orders the results by the startedAt field. +func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartedAt, opts...).ToFunc() +} + +// ByStoppedAt orders the results by the stoppedAt field. +func ByStoppedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStoppedAt, opts...).ToFunc() +} + +// BySourceIp orders the results by the sourceIp field. +func BySourceIp(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceIp, opts...).ToFunc() +} + +// BySourceRange orders the results by the sourceRange field. +func BySourceRange(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceRange, opts...).ToFunc() +} + +// BySourceAsNumber orders the results by the sourceAsNumber field. +func BySourceAsNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceAsNumber, opts...).ToFunc() +} + +// BySourceAsName orders the results by the sourceAsName field. +func BySourceAsName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceAsName, opts...).ToFunc() +} + +// BySourceCountry orders the results by the sourceCountry field. +func BySourceCountry(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceCountry, opts...).ToFunc() +} + +// BySourceLatitude orders the results by the sourceLatitude field. +func BySourceLatitude(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceLatitude, opts...).ToFunc() +} + +// BySourceLongitude orders the results by the sourceLongitude field. +func BySourceLongitude(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceLongitude, opts...).ToFunc() +} + +// BySourceScope orders the results by the sourceScope field. +func BySourceScope(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceScope, opts...).ToFunc() +} + +// BySourceValue orders the results by the sourceValue field. +func BySourceValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceValue, opts...).ToFunc() +} + +// ByCapacity orders the results by the capacity field. +func ByCapacity(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCapacity, opts...).ToFunc() +} + +// ByLeakSpeed orders the results by the leakSpeed field. +func ByLeakSpeed(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLeakSpeed, opts...).ToFunc() +} + +// ByScenarioVersion orders the results by the scenarioVersion field. +func ByScenarioVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenarioVersion, opts...).ToFunc() +} + +// ByScenarioHash orders the results by the scenarioHash field. +func ByScenarioHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenarioHash, opts...).ToFunc() +} + +// BySimulated orders the results by the simulated field. +func BySimulated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSimulated, opts...).ToFunc() +} + +// ByUUID orders the results by the uuid field. +func ByUUID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUUID, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} + +// ByDecisionsCount orders the results by decisions count. +func ByDecisionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDecisionsStep(), opts...) + } +} + +// ByDecisions orders the results by decisions terms. +func ByDecisions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDecisionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEventsCount orders the results by events count. +func ByEventsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEventsStep(), opts...) + } +} + +// ByEvents orders the results by events terms. +func ByEvents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEventsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByMetasCount orders the results by metas count. +func ByMetasCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newMetasStep(), opts...) + } +} + +// ByMetas orders the results by metas terms. +func ByMetas(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMetasStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} +func newDecisionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DecisionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), + ) +} +func newEventsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), + ) +} +func newMetasStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MetasInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), + ) +} diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index ef5b89b615f..516ead50636 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -12,2440 +12,1612 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) } // Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. func Scenario(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenario, v)) } // BucketId applies equality check predicate on the "bucketId" field. It's identical to BucketIdEQ. func BucketId(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldBucketId, v)) } // Message applies equality check predicate on the "message" field. It's identical to MessageEQ. func Message(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldMessage, v)) } // EventsCount applies equality check predicate on the "eventsCount" field. It's identical to EventsCountEQ. func EventsCount(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldEventsCount, v)) } // StartedAt applies equality check predicate on the "startedAt" field. It's identical to StartedAtEQ. func StartedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStartedAt, v)) } // StoppedAt applies equality check predicate on the "stoppedAt" field. It's identical to StoppedAtEQ. func StoppedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStoppedAt, v)) } // SourceIp applies equality check predicate on the "sourceIp" field. It's identical to SourceIpEQ. func SourceIp(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceIp, v)) } // SourceRange applies equality check predicate on the "sourceRange" field. It's identical to SourceRangeEQ. func SourceRange(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceRange, v)) } // SourceAsNumber applies equality check predicate on the "sourceAsNumber" field. It's identical to SourceAsNumberEQ. func SourceAsNumber(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsNumber, v)) } // SourceAsName applies equality check predicate on the "sourceAsName" field. It's identical to SourceAsNameEQ. func SourceAsName(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsName, v)) } // SourceCountry applies equality check predicate on the "sourceCountry" field. It's identical to SourceCountryEQ. func SourceCountry(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceCountry, v)) } // SourceLatitude applies equality check predicate on the "sourceLatitude" field. It's identical to SourceLatitudeEQ. func SourceLatitude(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLatitude, v)) } // SourceLongitude applies equality check predicate on the "sourceLongitude" field. It's identical to SourceLongitudeEQ. func SourceLongitude(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLongitude, v)) } // SourceScope applies equality check predicate on the "sourceScope" field. It's identical to SourceScopeEQ. func SourceScope(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceScope, v)) } // SourceValue applies equality check predicate on the "sourceValue" field. It's identical to SourceValueEQ. func SourceValue(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceValue, v)) } // Capacity applies equality check predicate on the "capacity" field. It's identical to CapacityEQ. func Capacity(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCapacity, v)) } // LeakSpeed applies equality check predicate on the "leakSpeed" field. It's identical to LeakSpeedEQ. func LeakSpeed(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldLeakSpeed, v)) } // ScenarioVersion applies equality check predicate on the "scenarioVersion" field. It's identical to ScenarioVersionEQ. func ScenarioVersion(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioVersion, v)) } // ScenarioHash applies equality check predicate on the "scenarioHash" field. It's identical to ScenarioHashEQ. func ScenarioHash(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioHash, v)) } // Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. func Simulated(v bool) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSimulated, v)) } // UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ. func UUID(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUUID, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldUpdatedAt)) } // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenario, v)) } // ScenarioNEQ applies the NEQ predicate on the "scenario" field. func ScenarioNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldScenario, v)) } // ScenarioIn applies the In predicate on the "scenario" field. func ScenarioIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenario), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldScenario, vs...)) } // ScenarioNotIn applies the NotIn predicate on the "scenario" field. func ScenarioNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenario), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldScenario, vs...)) } // ScenarioGT applies the GT predicate on the "scenario" field. func ScenarioGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldGT(FieldScenario, v)) } // ScenarioGTE applies the GTE predicate on the "scenario" field. func ScenarioGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldScenario, v)) } // ScenarioLT applies the LT predicate on the "scenario" field. func ScenarioLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldLT(FieldScenario, v)) } // ScenarioLTE applies the LTE predicate on the "scenario" field. func ScenarioLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldScenario, v)) } // ScenarioContains applies the Contains predicate on the "scenario" field. func ScenarioContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldContains(FieldScenario, v)) } // ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. func ScenarioHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldScenario, v)) } // ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. func ScenarioHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldScenario, v)) } // ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. func ScenarioEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldScenario, v)) } // ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. func ScenarioContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldScenario, v)) } // BucketIdEQ applies the EQ predicate on the "bucketId" field. func BucketIdEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldBucketId, v)) } // BucketIdNEQ applies the NEQ predicate on the "bucketId" field. func BucketIdNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldBucketId, v)) } // BucketIdIn applies the In predicate on the "bucketId" field. func BucketIdIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldBucketId), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldBucketId, vs...)) } // BucketIdNotIn applies the NotIn predicate on the "bucketId" field. func BucketIdNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldBucketId), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldBucketId, vs...)) } // BucketIdGT applies the GT predicate on the "bucketId" field. func BucketIdGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldGT(FieldBucketId, v)) } // BucketIdGTE applies the GTE predicate on the "bucketId" field. func BucketIdGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldBucketId, v)) } // BucketIdLT applies the LT predicate on the "bucketId" field. func BucketIdLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldLT(FieldBucketId, v)) } // BucketIdLTE applies the LTE predicate on the "bucketId" field. func BucketIdLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldBucketId, v)) } // BucketIdContains applies the Contains predicate on the "bucketId" field. func BucketIdContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldContains(FieldBucketId, v)) } // BucketIdHasPrefix applies the HasPrefix predicate on the "bucketId" field. func BucketIdHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldBucketId, v)) } // BucketIdHasSuffix applies the HasSuffix predicate on the "bucketId" field. func BucketIdHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldBucketId, v)) } // BucketIdIsNil applies the IsNil predicate on the "bucketId" field. func BucketIdIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldBucketId))) - }) + return predicate.Alert(sql.FieldIsNull(FieldBucketId)) } // BucketIdNotNil applies the NotNil predicate on the "bucketId" field. func BucketIdNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldBucketId))) - }) + return predicate.Alert(sql.FieldNotNull(FieldBucketId)) } // BucketIdEqualFold applies the EqualFold predicate on the "bucketId" field. func BucketIdEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldBucketId, v)) } // BucketIdContainsFold applies the ContainsFold predicate on the "bucketId" field. func BucketIdContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldBucketId, v)) } // MessageEQ applies the EQ predicate on the "message" field. func MessageEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldMessage, v)) } // MessageNEQ applies the NEQ predicate on the "message" field. func MessageNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldMessage, v)) } // MessageIn applies the In predicate on the "message" field. func MessageIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldMessage), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldMessage, vs...)) } // MessageNotIn applies the NotIn predicate on the "message" field. func MessageNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldMessage), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldMessage, vs...)) } // MessageGT applies the GT predicate on the "message" field. func MessageGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldGT(FieldMessage, v)) } // MessageGTE applies the GTE predicate on the "message" field. func MessageGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldMessage, v)) } // MessageLT applies the LT predicate on the "message" field. func MessageLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldLT(FieldMessage, v)) } // MessageLTE applies the LTE predicate on the "message" field. func MessageLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldMessage, v)) } // MessageContains applies the Contains predicate on the "message" field. func MessageContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldContains(FieldMessage, v)) } // MessageHasPrefix applies the HasPrefix predicate on the "message" field. func MessageHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldMessage, v)) } // MessageHasSuffix applies the HasSuffix predicate on the "message" field. func MessageHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldMessage, v)) } // MessageIsNil applies the IsNil predicate on the "message" field. func MessageIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldMessage))) - }) + return predicate.Alert(sql.FieldIsNull(FieldMessage)) } // MessageNotNil applies the NotNil predicate on the "message" field. func MessageNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldMessage))) - }) + return predicate.Alert(sql.FieldNotNull(FieldMessage)) } // MessageEqualFold applies the EqualFold predicate on the "message" field. func MessageEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldMessage, v)) } // MessageContainsFold applies the ContainsFold predicate on the "message" field. func MessageContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldMessage, v)) } // EventsCountEQ applies the EQ predicate on the "eventsCount" field. func EventsCountEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldEventsCount, v)) } // EventsCountNEQ applies the NEQ predicate on the "eventsCount" field. func EventsCountNEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldEventsCount, v)) } // EventsCountIn applies the In predicate on the "eventsCount" field. func EventsCountIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldEventsCount), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldEventsCount, vs...)) } // EventsCountNotIn applies the NotIn predicate on the "eventsCount" field. func EventsCountNotIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldEventsCount), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldEventsCount, vs...)) } // EventsCountGT applies the GT predicate on the "eventsCount" field. func EventsCountGT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldGT(FieldEventsCount, v)) } // EventsCountGTE applies the GTE predicate on the "eventsCount" field. func EventsCountGTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldEventsCount, v)) } // EventsCountLT applies the LT predicate on the "eventsCount" field. func EventsCountLT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldLT(FieldEventsCount, v)) } // EventsCountLTE applies the LTE predicate on the "eventsCount" field. func EventsCountLTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldEventsCount, v)) } // EventsCountIsNil applies the IsNil predicate on the "eventsCount" field. func EventsCountIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEventsCount))) - }) + return predicate.Alert(sql.FieldIsNull(FieldEventsCount)) } // EventsCountNotNil applies the NotNil predicate on the "eventsCount" field. func EventsCountNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEventsCount))) - }) + return predicate.Alert(sql.FieldNotNull(FieldEventsCount)) } // StartedAtEQ applies the EQ predicate on the "startedAt" field. func StartedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStartedAt, v)) } // StartedAtNEQ applies the NEQ predicate on the "startedAt" field. func StartedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldStartedAt, v)) } // StartedAtIn applies the In predicate on the "startedAt" field. func StartedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStartedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldStartedAt, vs...)) } // StartedAtNotIn applies the NotIn predicate on the "startedAt" field. func StartedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStartedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldStartedAt, vs...)) } // StartedAtGT applies the GT predicate on the "startedAt" field. func StartedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldStartedAt, v)) } // StartedAtGTE applies the GTE predicate on the "startedAt" field. func StartedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldStartedAt, v)) } // StartedAtLT applies the LT predicate on the "startedAt" field. func StartedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldStartedAt, v)) } // StartedAtLTE applies the LTE predicate on the "startedAt" field. func StartedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldStartedAt, v)) } // StartedAtIsNil applies the IsNil predicate on the "startedAt" field. func StartedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldStartedAt)) } // StartedAtNotNil applies the NotNil predicate on the "startedAt" field. func StartedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldStartedAt)) } // StoppedAtEQ applies the EQ predicate on the "stoppedAt" field. func StoppedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStoppedAt, v)) } // StoppedAtNEQ applies the NEQ predicate on the "stoppedAt" field. func StoppedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldStoppedAt, v)) } // StoppedAtIn applies the In predicate on the "stoppedAt" field. func StoppedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStoppedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldStoppedAt, vs...)) } // StoppedAtNotIn applies the NotIn predicate on the "stoppedAt" field. func StoppedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStoppedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldStoppedAt, vs...)) } // StoppedAtGT applies the GT predicate on the "stoppedAt" field. func StoppedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldStoppedAt, v)) } // StoppedAtGTE applies the GTE predicate on the "stoppedAt" field. func StoppedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldStoppedAt, v)) } // StoppedAtLT applies the LT predicate on the "stoppedAt" field. func StoppedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldStoppedAt, v)) } // StoppedAtLTE applies the LTE predicate on the "stoppedAt" field. func StoppedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldStoppedAt, v)) } // StoppedAtIsNil applies the IsNil predicate on the "stoppedAt" field. func StoppedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStoppedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldStoppedAt)) } // StoppedAtNotNil applies the NotNil predicate on the "stoppedAt" field. func StoppedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStoppedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldStoppedAt)) } // SourceIpEQ applies the EQ predicate on the "sourceIp" field. func SourceIpEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceIp, v)) } // SourceIpNEQ applies the NEQ predicate on the "sourceIp" field. func SourceIpNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceIp, v)) } // SourceIpIn applies the In predicate on the "sourceIp" field. func SourceIpIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceIp), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceIp, vs...)) } // SourceIpNotIn applies the NotIn predicate on the "sourceIp" field. func SourceIpNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceIp), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceIp, vs...)) } // SourceIpGT applies the GT predicate on the "sourceIp" field. func SourceIpGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceIp, v)) } // SourceIpGTE applies the GTE predicate on the "sourceIp" field. func SourceIpGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceIp, v)) } // SourceIpLT applies the LT predicate on the "sourceIp" field. func SourceIpLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceIp, v)) } // SourceIpLTE applies the LTE predicate on the "sourceIp" field. func SourceIpLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceIp, v)) } // SourceIpContains applies the Contains predicate on the "sourceIp" field. func SourceIpContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceIp, v)) } // SourceIpHasPrefix applies the HasPrefix predicate on the "sourceIp" field. func SourceIpHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceIp, v)) } // SourceIpHasSuffix applies the HasSuffix predicate on the "sourceIp" field. func SourceIpHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceIp, v)) } // SourceIpIsNil applies the IsNil predicate on the "sourceIp" field. func SourceIpIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceIp))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceIp)) } // SourceIpNotNil applies the NotNil predicate on the "sourceIp" field. func SourceIpNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceIp))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceIp)) } // SourceIpEqualFold applies the EqualFold predicate on the "sourceIp" field. func SourceIpEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceIp, v)) } // SourceIpContainsFold applies the ContainsFold predicate on the "sourceIp" field. func SourceIpContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceIp, v)) } // SourceRangeEQ applies the EQ predicate on the "sourceRange" field. func SourceRangeEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceRange, v)) } // SourceRangeNEQ applies the NEQ predicate on the "sourceRange" field. func SourceRangeNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceRange, v)) } // SourceRangeIn applies the In predicate on the "sourceRange" field. func SourceRangeIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceRange), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceRange, vs...)) } // SourceRangeNotIn applies the NotIn predicate on the "sourceRange" field. func SourceRangeNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceRange), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceRange, vs...)) } // SourceRangeGT applies the GT predicate on the "sourceRange" field. func SourceRangeGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceRange, v)) } // SourceRangeGTE applies the GTE predicate on the "sourceRange" field. func SourceRangeGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceRange, v)) } // SourceRangeLT applies the LT predicate on the "sourceRange" field. func SourceRangeLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceRange, v)) } // SourceRangeLTE applies the LTE predicate on the "sourceRange" field. func SourceRangeLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceRange, v)) } // SourceRangeContains applies the Contains predicate on the "sourceRange" field. func SourceRangeContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceRange, v)) } // SourceRangeHasPrefix applies the HasPrefix predicate on the "sourceRange" field. func SourceRangeHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceRange, v)) } // SourceRangeHasSuffix applies the HasSuffix predicate on the "sourceRange" field. func SourceRangeHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceRange, v)) } // SourceRangeIsNil applies the IsNil predicate on the "sourceRange" field. func SourceRangeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceRange))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceRange)) } // SourceRangeNotNil applies the NotNil predicate on the "sourceRange" field. func SourceRangeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceRange))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceRange)) } // SourceRangeEqualFold applies the EqualFold predicate on the "sourceRange" field. func SourceRangeEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceRange, v)) } // SourceRangeContainsFold applies the ContainsFold predicate on the "sourceRange" field. func SourceRangeContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceRange, v)) } // SourceAsNumberEQ applies the EQ predicate on the "sourceAsNumber" field. func SourceAsNumberEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsNumber, v)) } // SourceAsNumberNEQ applies the NEQ predicate on the "sourceAsNumber" field. func SourceAsNumberNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceAsNumber, v)) } // SourceAsNumberIn applies the In predicate on the "sourceAsNumber" field. func SourceAsNumberIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceAsNumber), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceAsNumber, vs...)) } // SourceAsNumberNotIn applies the NotIn predicate on the "sourceAsNumber" field. func SourceAsNumberNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceAsNumber), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceAsNumber, vs...)) } // SourceAsNumberGT applies the GT predicate on the "sourceAsNumber" field. func SourceAsNumberGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceAsNumber, v)) } // SourceAsNumberGTE applies the GTE predicate on the "sourceAsNumber" field. func SourceAsNumberGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceAsNumber, v)) } // SourceAsNumberLT applies the LT predicate on the "sourceAsNumber" field. func SourceAsNumberLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceAsNumber, v)) } // SourceAsNumberLTE applies the LTE predicate on the "sourceAsNumber" field. func SourceAsNumberLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceAsNumber, v)) } // SourceAsNumberContains applies the Contains predicate on the "sourceAsNumber" field. func SourceAsNumberContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceAsNumber, v)) } // SourceAsNumberHasPrefix applies the HasPrefix predicate on the "sourceAsNumber" field. func SourceAsNumberHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceAsNumber, v)) } // SourceAsNumberHasSuffix applies the HasSuffix predicate on the "sourceAsNumber" field. func SourceAsNumberHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceAsNumber, v)) } // SourceAsNumberIsNil applies the IsNil predicate on the "sourceAsNumber" field. func SourceAsNumberIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceAsNumber))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceAsNumber)) } // SourceAsNumberNotNil applies the NotNil predicate on the "sourceAsNumber" field. func SourceAsNumberNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceAsNumber))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceAsNumber)) } // SourceAsNumberEqualFold applies the EqualFold predicate on the "sourceAsNumber" field. func SourceAsNumberEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceAsNumber, v)) } // SourceAsNumberContainsFold applies the ContainsFold predicate on the "sourceAsNumber" field. func SourceAsNumberContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceAsNumber, v)) } // SourceAsNameEQ applies the EQ predicate on the "sourceAsName" field. func SourceAsNameEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsName, v)) } // SourceAsNameNEQ applies the NEQ predicate on the "sourceAsName" field. func SourceAsNameNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceAsName, v)) } // SourceAsNameIn applies the In predicate on the "sourceAsName" field. func SourceAsNameIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceAsName), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceAsName, vs...)) } // SourceAsNameNotIn applies the NotIn predicate on the "sourceAsName" field. func SourceAsNameNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceAsName), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceAsName, vs...)) } // SourceAsNameGT applies the GT predicate on the "sourceAsName" field. func SourceAsNameGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceAsName, v)) } // SourceAsNameGTE applies the GTE predicate on the "sourceAsName" field. func SourceAsNameGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceAsName, v)) } // SourceAsNameLT applies the LT predicate on the "sourceAsName" field. func SourceAsNameLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceAsName, v)) } // SourceAsNameLTE applies the LTE predicate on the "sourceAsName" field. func SourceAsNameLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceAsName, v)) } // SourceAsNameContains applies the Contains predicate on the "sourceAsName" field. func SourceAsNameContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceAsName, v)) } // SourceAsNameHasPrefix applies the HasPrefix predicate on the "sourceAsName" field. func SourceAsNameHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceAsName, v)) } // SourceAsNameHasSuffix applies the HasSuffix predicate on the "sourceAsName" field. func SourceAsNameHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceAsName, v)) } // SourceAsNameIsNil applies the IsNil predicate on the "sourceAsName" field. func SourceAsNameIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceAsName))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceAsName)) } // SourceAsNameNotNil applies the NotNil predicate on the "sourceAsName" field. func SourceAsNameNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceAsName))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceAsName)) } // SourceAsNameEqualFold applies the EqualFold predicate on the "sourceAsName" field. func SourceAsNameEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceAsName, v)) } // SourceAsNameContainsFold applies the ContainsFold predicate on the "sourceAsName" field. func SourceAsNameContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceAsName, v)) } // SourceCountryEQ applies the EQ predicate on the "sourceCountry" field. func SourceCountryEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceCountry, v)) } // SourceCountryNEQ applies the NEQ predicate on the "sourceCountry" field. func SourceCountryNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceCountry, v)) } // SourceCountryIn applies the In predicate on the "sourceCountry" field. func SourceCountryIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceCountry), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceCountry, vs...)) } // SourceCountryNotIn applies the NotIn predicate on the "sourceCountry" field. func SourceCountryNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceCountry), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceCountry, vs...)) } // SourceCountryGT applies the GT predicate on the "sourceCountry" field. func SourceCountryGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceCountry, v)) } // SourceCountryGTE applies the GTE predicate on the "sourceCountry" field. func SourceCountryGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceCountry, v)) } // SourceCountryLT applies the LT predicate on the "sourceCountry" field. func SourceCountryLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceCountry, v)) } // SourceCountryLTE applies the LTE predicate on the "sourceCountry" field. func SourceCountryLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceCountry, v)) } // SourceCountryContains applies the Contains predicate on the "sourceCountry" field. func SourceCountryContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceCountry, v)) } // SourceCountryHasPrefix applies the HasPrefix predicate on the "sourceCountry" field. func SourceCountryHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceCountry, v)) } // SourceCountryHasSuffix applies the HasSuffix predicate on the "sourceCountry" field. func SourceCountryHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceCountry, v)) } // SourceCountryIsNil applies the IsNil predicate on the "sourceCountry" field. func SourceCountryIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceCountry))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceCountry)) } // SourceCountryNotNil applies the NotNil predicate on the "sourceCountry" field. func SourceCountryNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceCountry))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceCountry)) } // SourceCountryEqualFold applies the EqualFold predicate on the "sourceCountry" field. func SourceCountryEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceCountry, v)) } // SourceCountryContainsFold applies the ContainsFold predicate on the "sourceCountry" field. func SourceCountryContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceCountry, v)) } // SourceLatitudeEQ applies the EQ predicate on the "sourceLatitude" field. func SourceLatitudeEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLatitude, v)) } // SourceLatitudeNEQ applies the NEQ predicate on the "sourceLatitude" field. func SourceLatitudeNEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceLatitude, v)) } // SourceLatitudeIn applies the In predicate on the "sourceLatitude" field. func SourceLatitudeIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceLatitude), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceLatitude, vs...)) } // SourceLatitudeNotIn applies the NotIn predicate on the "sourceLatitude" field. func SourceLatitudeNotIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceLatitude), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceLatitude, vs...)) } // SourceLatitudeGT applies the GT predicate on the "sourceLatitude" field. func SourceLatitudeGT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceLatitude, v)) } // SourceLatitudeGTE applies the GTE predicate on the "sourceLatitude" field. -func SourceLatitudeGTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceLatitude), v)) - }) +func SourceLatitudeGTE(v float32) predicate.Alert { + return predicate.Alert(sql.FieldGTE(FieldSourceLatitude, v)) } // SourceLatitudeLT applies the LT predicate on the "sourceLatitude" field. func SourceLatitudeLT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceLatitude, v)) } // SourceLatitudeLTE applies the LTE predicate on the "sourceLatitude" field. func SourceLatitudeLTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceLatitude, v)) } // SourceLatitudeIsNil applies the IsNil predicate on the "sourceLatitude" field. func SourceLatitudeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceLatitude))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceLatitude)) } // SourceLatitudeNotNil applies the NotNil predicate on the "sourceLatitude" field. func SourceLatitudeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceLatitude))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceLatitude)) } // SourceLongitudeEQ applies the EQ predicate on the "sourceLongitude" field. func SourceLongitudeEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLongitude, v)) } // SourceLongitudeNEQ applies the NEQ predicate on the "sourceLongitude" field. func SourceLongitudeNEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceLongitude, v)) } // SourceLongitudeIn applies the In predicate on the "sourceLongitude" field. func SourceLongitudeIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceLongitude), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceLongitude, vs...)) } // SourceLongitudeNotIn applies the NotIn predicate on the "sourceLongitude" field. func SourceLongitudeNotIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceLongitude), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceLongitude, vs...)) } // SourceLongitudeGT applies the GT predicate on the "sourceLongitude" field. func SourceLongitudeGT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceLongitude, v)) } // SourceLongitudeGTE applies the GTE predicate on the "sourceLongitude" field. func SourceLongitudeGTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceLongitude, v)) } // SourceLongitudeLT applies the LT predicate on the "sourceLongitude" field. func SourceLongitudeLT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceLongitude, v)) } // SourceLongitudeLTE applies the LTE predicate on the "sourceLongitude" field. func SourceLongitudeLTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceLongitude, v)) } // SourceLongitudeIsNil applies the IsNil predicate on the "sourceLongitude" field. func SourceLongitudeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceLongitude))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceLongitude)) } // SourceLongitudeNotNil applies the NotNil predicate on the "sourceLongitude" field. func SourceLongitudeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceLongitude))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceLongitude)) } // SourceScopeEQ applies the EQ predicate on the "sourceScope" field. func SourceScopeEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceScope, v)) } // SourceScopeNEQ applies the NEQ predicate on the "sourceScope" field. func SourceScopeNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceScope, v)) } // SourceScopeIn applies the In predicate on the "sourceScope" field. func SourceScopeIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceScope), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceScope, vs...)) } // SourceScopeNotIn applies the NotIn predicate on the "sourceScope" field. func SourceScopeNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceScope), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceScope, vs...)) } // SourceScopeGT applies the GT predicate on the "sourceScope" field. func SourceScopeGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceScope, v)) } // SourceScopeGTE applies the GTE predicate on the "sourceScope" field. func SourceScopeGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceScope, v)) } // SourceScopeLT applies the LT predicate on the "sourceScope" field. func SourceScopeLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceScope, v)) } // SourceScopeLTE applies the LTE predicate on the "sourceScope" field. func SourceScopeLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceScope, v)) } // SourceScopeContains applies the Contains predicate on the "sourceScope" field. func SourceScopeContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceScope, v)) } // SourceScopeHasPrefix applies the HasPrefix predicate on the "sourceScope" field. func SourceScopeHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceScope, v)) } // SourceScopeHasSuffix applies the HasSuffix predicate on the "sourceScope" field. func SourceScopeHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceScope, v)) } // SourceScopeIsNil applies the IsNil predicate on the "sourceScope" field. func SourceScopeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceScope))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceScope)) } // SourceScopeNotNil applies the NotNil predicate on the "sourceScope" field. func SourceScopeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceScope))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceScope)) } // SourceScopeEqualFold applies the EqualFold predicate on the "sourceScope" field. func SourceScopeEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceScope, v)) } // SourceScopeContainsFold applies the ContainsFold predicate on the "sourceScope" field. func SourceScopeContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceScope, v)) } // SourceValueEQ applies the EQ predicate on the "sourceValue" field. func SourceValueEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceValue, v)) } // SourceValueNEQ applies the NEQ predicate on the "sourceValue" field. func SourceValueNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceValue, v)) } // SourceValueIn applies the In predicate on the "sourceValue" field. func SourceValueIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceValue), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceValue, vs...)) } // SourceValueNotIn applies the NotIn predicate on the "sourceValue" field. func SourceValueNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceValue), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceValue, vs...)) } // SourceValueGT applies the GT predicate on the "sourceValue" field. func SourceValueGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceValue, v)) } // SourceValueGTE applies the GTE predicate on the "sourceValue" field. func SourceValueGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceValue, v)) } // SourceValueLT applies the LT predicate on the "sourceValue" field. func SourceValueLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceValue, v)) } // SourceValueLTE applies the LTE predicate on the "sourceValue" field. func SourceValueLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceValue, v)) } // SourceValueContains applies the Contains predicate on the "sourceValue" field. func SourceValueContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceValue, v)) } // SourceValueHasPrefix applies the HasPrefix predicate on the "sourceValue" field. func SourceValueHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceValue, v)) } // SourceValueHasSuffix applies the HasSuffix predicate on the "sourceValue" field. func SourceValueHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceValue, v)) } // SourceValueIsNil applies the IsNil predicate on the "sourceValue" field. func SourceValueIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceValue))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceValue)) } // SourceValueNotNil applies the NotNil predicate on the "sourceValue" field. func SourceValueNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceValue))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceValue)) } // SourceValueEqualFold applies the EqualFold predicate on the "sourceValue" field. func SourceValueEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceValue, v)) } // SourceValueContainsFold applies the ContainsFold predicate on the "sourceValue" field. func SourceValueContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceValue, v)) } // CapacityEQ applies the EQ predicate on the "capacity" field. func CapacityEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCapacity, v)) } // CapacityNEQ applies the NEQ predicate on the "capacity" field. func CapacityNEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldCapacity, v)) } // CapacityIn applies the In predicate on the "capacity" field. func CapacityIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCapacity), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldCapacity, vs...)) } // CapacityNotIn applies the NotIn predicate on the "capacity" field. func CapacityNotIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCapacity), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldCapacity, vs...)) } // CapacityGT applies the GT predicate on the "capacity" field. func CapacityGT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldGT(FieldCapacity, v)) } // CapacityGTE applies the GTE predicate on the "capacity" field. func CapacityGTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldCapacity, v)) } // CapacityLT applies the LT predicate on the "capacity" field. func CapacityLT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldLT(FieldCapacity, v)) } // CapacityLTE applies the LTE predicate on the "capacity" field. func CapacityLTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldCapacity, v)) } // CapacityIsNil applies the IsNil predicate on the "capacity" field. func CapacityIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCapacity))) - }) + return predicate.Alert(sql.FieldIsNull(FieldCapacity)) } // CapacityNotNil applies the NotNil predicate on the "capacity" field. func CapacityNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCapacity))) - }) + return predicate.Alert(sql.FieldNotNull(FieldCapacity)) } // LeakSpeedEQ applies the EQ predicate on the "leakSpeed" field. func LeakSpeedEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldLeakSpeed, v)) } // LeakSpeedNEQ applies the NEQ predicate on the "leakSpeed" field. func LeakSpeedNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldLeakSpeed, v)) } // LeakSpeedIn applies the In predicate on the "leakSpeed" field. func LeakSpeedIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLeakSpeed), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldLeakSpeed, vs...)) } // LeakSpeedNotIn applies the NotIn predicate on the "leakSpeed" field. func LeakSpeedNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLeakSpeed), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldLeakSpeed, vs...)) } // LeakSpeedGT applies the GT predicate on the "leakSpeed" field. func LeakSpeedGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldGT(FieldLeakSpeed, v)) } // LeakSpeedGTE applies the GTE predicate on the "leakSpeed" field. func LeakSpeedGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldLeakSpeed, v)) } // LeakSpeedLT applies the LT predicate on the "leakSpeed" field. func LeakSpeedLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldLT(FieldLeakSpeed, v)) } // LeakSpeedLTE applies the LTE predicate on the "leakSpeed" field. func LeakSpeedLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldLeakSpeed, v)) } // LeakSpeedContains applies the Contains predicate on the "leakSpeed" field. func LeakSpeedContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldContains(FieldLeakSpeed, v)) } // LeakSpeedHasPrefix applies the HasPrefix predicate on the "leakSpeed" field. func LeakSpeedHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldLeakSpeed, v)) } // LeakSpeedHasSuffix applies the HasSuffix predicate on the "leakSpeed" field. func LeakSpeedHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldLeakSpeed, v)) } // LeakSpeedIsNil applies the IsNil predicate on the "leakSpeed" field. func LeakSpeedIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLeakSpeed))) - }) + return predicate.Alert(sql.FieldIsNull(FieldLeakSpeed)) } // LeakSpeedNotNil applies the NotNil predicate on the "leakSpeed" field. func LeakSpeedNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLeakSpeed))) - }) + return predicate.Alert(sql.FieldNotNull(FieldLeakSpeed)) } // LeakSpeedEqualFold applies the EqualFold predicate on the "leakSpeed" field. func LeakSpeedEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldLeakSpeed, v)) } // LeakSpeedContainsFold applies the ContainsFold predicate on the "leakSpeed" field. func LeakSpeedContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldLeakSpeed, v)) } // ScenarioVersionEQ applies the EQ predicate on the "scenarioVersion" field. func ScenarioVersionEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioVersion, v)) } // ScenarioVersionNEQ applies the NEQ predicate on the "scenarioVersion" field. func ScenarioVersionNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldScenarioVersion, v)) } // ScenarioVersionIn applies the In predicate on the "scenarioVersion" field. func ScenarioVersionIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenarioVersion), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldScenarioVersion, vs...)) } // ScenarioVersionNotIn applies the NotIn predicate on the "scenarioVersion" field. func ScenarioVersionNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenarioVersion), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldScenarioVersion, vs...)) } // ScenarioVersionGT applies the GT predicate on the "scenarioVersion" field. func ScenarioVersionGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldGT(FieldScenarioVersion, v)) } // ScenarioVersionGTE applies the GTE predicate on the "scenarioVersion" field. func ScenarioVersionGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldScenarioVersion, v)) } // ScenarioVersionLT applies the LT predicate on the "scenarioVersion" field. func ScenarioVersionLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldLT(FieldScenarioVersion, v)) } // ScenarioVersionLTE applies the LTE predicate on the "scenarioVersion" field. func ScenarioVersionLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldScenarioVersion, v)) } // ScenarioVersionContains applies the Contains predicate on the "scenarioVersion" field. func ScenarioVersionContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldContains(FieldScenarioVersion, v)) } // ScenarioVersionHasPrefix applies the HasPrefix predicate on the "scenarioVersion" field. func ScenarioVersionHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldScenarioVersion, v)) } // ScenarioVersionHasSuffix applies the HasSuffix predicate on the "scenarioVersion" field. func ScenarioVersionHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldScenarioVersion, v)) } // ScenarioVersionIsNil applies the IsNil predicate on the "scenarioVersion" field. func ScenarioVersionIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldScenarioVersion))) - }) + return predicate.Alert(sql.FieldIsNull(FieldScenarioVersion)) } // ScenarioVersionNotNil applies the NotNil predicate on the "scenarioVersion" field. func ScenarioVersionNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldScenarioVersion))) - }) + return predicate.Alert(sql.FieldNotNull(FieldScenarioVersion)) } // ScenarioVersionEqualFold applies the EqualFold predicate on the "scenarioVersion" field. func ScenarioVersionEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldScenarioVersion, v)) } // ScenarioVersionContainsFold applies the ContainsFold predicate on the "scenarioVersion" field. func ScenarioVersionContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldScenarioVersion, v)) } // ScenarioHashEQ applies the EQ predicate on the "scenarioHash" field. func ScenarioHashEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioHash, v)) } // ScenarioHashNEQ applies the NEQ predicate on the "scenarioHash" field. func ScenarioHashNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldScenarioHash, v)) } // ScenarioHashIn applies the In predicate on the "scenarioHash" field. func ScenarioHashIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenarioHash), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldScenarioHash, vs...)) } // ScenarioHashNotIn applies the NotIn predicate on the "scenarioHash" field. func ScenarioHashNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenarioHash), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldScenarioHash, vs...)) } // ScenarioHashGT applies the GT predicate on the "scenarioHash" field. func ScenarioHashGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldGT(FieldScenarioHash, v)) } // ScenarioHashGTE applies the GTE predicate on the "scenarioHash" field. func ScenarioHashGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldScenarioHash, v)) } // ScenarioHashLT applies the LT predicate on the "scenarioHash" field. func ScenarioHashLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldLT(FieldScenarioHash, v)) } // ScenarioHashLTE applies the LTE predicate on the "scenarioHash" field. func ScenarioHashLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldScenarioHash, v)) } // ScenarioHashContains applies the Contains predicate on the "scenarioHash" field. func ScenarioHashContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldContains(FieldScenarioHash, v)) } // ScenarioHashHasPrefix applies the HasPrefix predicate on the "scenarioHash" field. func ScenarioHashHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldScenarioHash, v)) } // ScenarioHashHasSuffix applies the HasSuffix predicate on the "scenarioHash" field. func ScenarioHashHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldScenarioHash, v)) } // ScenarioHashIsNil applies the IsNil predicate on the "scenarioHash" field. func ScenarioHashIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldScenarioHash))) - }) + return predicate.Alert(sql.FieldIsNull(FieldScenarioHash)) } // ScenarioHashNotNil applies the NotNil predicate on the "scenarioHash" field. func ScenarioHashNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldScenarioHash))) - }) + return predicate.Alert(sql.FieldNotNull(FieldScenarioHash)) } // ScenarioHashEqualFold applies the EqualFold predicate on the "scenarioHash" field. func ScenarioHashEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldScenarioHash, v)) } // ScenarioHashContainsFold applies the ContainsFold predicate on the "scenarioHash" field. func ScenarioHashContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldScenarioHash, v)) } // SimulatedEQ applies the EQ predicate on the "simulated" field. func SimulatedEQ(v bool) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSimulated, v)) } // SimulatedNEQ applies the NEQ predicate on the "simulated" field. func SimulatedNEQ(v bool) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSimulated), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSimulated, v)) } // UUIDEQ applies the EQ predicate on the "uuid" field. func UUIDEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUUID, v)) } // UUIDNEQ applies the NEQ predicate on the "uuid" field. func UUIDNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldUUID, v)) } // UUIDIn applies the In predicate on the "uuid" field. func UUIDIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUUID), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldUUID, vs...)) } // UUIDNotIn applies the NotIn predicate on the "uuid" field. func UUIDNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUUID), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldUUID, vs...)) } // UUIDGT applies the GT predicate on the "uuid" field. func UUIDGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldGT(FieldUUID, v)) } // UUIDGTE applies the GTE predicate on the "uuid" field. func UUIDGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldUUID, v)) } // UUIDLT applies the LT predicate on the "uuid" field. func UUIDLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldLT(FieldUUID, v)) } // UUIDLTE applies the LTE predicate on the "uuid" field. func UUIDLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldUUID, v)) } // UUIDContains applies the Contains predicate on the "uuid" field. func UUIDContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldContains(FieldUUID, v)) } // UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field. func UUIDHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldUUID, v)) } // UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field. func UUIDHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldUUID, v)) } // UUIDIsNil applies the IsNil predicate on the "uuid" field. func UUIDIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUUID))) - }) + return predicate.Alert(sql.FieldIsNull(FieldUUID)) } // UUIDNotNil applies the NotNil predicate on the "uuid" field. func UUIDNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUUID))) - }) + return predicate.Alert(sql.FieldNotNull(FieldUUID)) } // UUIDEqualFold applies the EqualFold predicate on the "uuid" field. func UUIDEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldUUID, v)) } // UUIDContainsFold applies the ContainsFold predicate on the "uuid" field. func UUIDContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldUUID, v)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -2453,7 +1625,6 @@ func HasOwner() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2463,11 +1634,7 @@ func HasOwner() predicate.Alert { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Machine) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2481,7 +1648,6 @@ func HasDecisions() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DecisionsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2491,11 +1657,7 @@ func HasDecisions() predicate.Alert { // HasDecisionsWith applies the HasEdge predicate on the "decisions" edge with a given conditions (other predicates). func HasDecisionsWith(preds ...predicate.Decision) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DecisionsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), - ) + step := newDecisionsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2509,7 +1671,6 @@ func HasEvents() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EventsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2519,11 +1680,7 @@ func HasEvents() predicate.Alert { // HasEventsWith applies the HasEdge predicate on the "events" edge with a given conditions (other predicates). func HasEventsWith(preds ...predicate.Event) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EventsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), - ) + step := newEventsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2537,7 +1694,6 @@ func HasMetas() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(MetasTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2547,11 +1703,7 @@ func HasMetas() predicate.Alert { // HasMetasWith applies the HasEdge predicate on the "metas" edge with a given conditions (other predicates). func HasMetasWith(preds ...predicate.Meta) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(MetasInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), - ) + step := newMetasStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2562,32 +1714,15 @@ func HasMetasWith(preds ...predicate.Meta) predicate.Alert { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Alert) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Alert(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Alert) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Alert(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Alert) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Alert(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index 42da5b137ba..c7498442c06 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -409,50 +409,8 @@ func (ac *AlertCreate) Mutation() *AlertMutation { // Save creates the Alert in the database. func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) { - var ( - err error - node *Alert - ) ac.defaults() - if len(ac.hooks) == 0 { - if err = ac.check(); err != nil { - return nil, err - } - node, err = ac.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ac.check(); err != nil { - return nil, err - } - ac.mutation = mutation - if node, err = ac.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ac.hooks) - 1; i >= 0; i-- { - if ac.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ac.hooks[i](mut) - } - v, err := mut.Mutate(ctx, ac.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Alert) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -525,6 +483,9 @@ func (ac *AlertCreate) check() error { } func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) { + if err := ac.check(); err != nil { + return nil, err + } _node, _spec := ac.createSpec() if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -534,202 +495,106 @@ func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + ac.mutation.id = &_node.ID + ac.mutation.done = true return _node, nil } func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { var ( _node = &Alert{config: ac.config} - _spec = &sqlgraph.CreateSpec{ - Table: alert.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) ) if value, ok := ac.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldCreatedAt, - }) + _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := ac.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldUpdatedAt, - }) + _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := ac.mutation.Scenario(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenario, - }) + _spec.SetField(alert.FieldScenario, field.TypeString, value) _node.Scenario = value } if value, ok := ac.mutation.BucketId(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldBucketId, - }) + _spec.SetField(alert.FieldBucketId, field.TypeString, value) _node.BucketId = value } if value, ok := ac.mutation.Message(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldMessage, - }) + _spec.SetField(alert.FieldMessage, field.TypeString, value) _node.Message = value } if value, ok := ac.mutation.EventsCount(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) _node.EventsCount = value } if value, ok := ac.mutation.StartedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStartedAt, - }) + _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) _node.StartedAt = value } if value, ok := ac.mutation.StoppedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStoppedAt, - }) + _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) _node.StoppedAt = value } if value, ok := ac.mutation.SourceIp(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceIp, - }) + _spec.SetField(alert.FieldSourceIp, field.TypeString, value) _node.SourceIp = value } if value, ok := ac.mutation.SourceRange(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceRange, - }) + _spec.SetField(alert.FieldSourceRange, field.TypeString, value) _node.SourceRange = value } if value, ok := ac.mutation.SourceAsNumber(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsNumber, - }) + _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) _node.SourceAsNumber = value } if value, ok := ac.mutation.SourceAsName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsName, - }) + _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) _node.SourceAsName = value } if value, ok := ac.mutation.SourceCountry(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceCountry, - }) + _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) _node.SourceCountry = value } if value, ok := ac.mutation.SourceLatitude(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) _node.SourceLatitude = value } if value, ok := ac.mutation.SourceLongitude(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) _node.SourceLongitude = value } if value, ok := ac.mutation.SourceScope(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceScope, - }) + _spec.SetField(alert.FieldSourceScope, field.TypeString, value) _node.SourceScope = value } if value, ok := ac.mutation.SourceValue(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceValue, - }) + _spec.SetField(alert.FieldSourceValue, field.TypeString, value) _node.SourceValue = value } if value, ok := ac.mutation.Capacity(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) _node.Capacity = value } if value, ok := ac.mutation.LeakSpeed(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldLeakSpeed, - }) + _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) _node.LeakSpeed = value } if value, ok := ac.mutation.ScenarioVersion(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioVersion, - }) + _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) _node.ScenarioVersion = value } if value, ok := ac.mutation.ScenarioHash(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioHash, - }) + _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) _node.ScenarioHash = value } if value, ok := ac.mutation.Simulated(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: alert.FieldSimulated, - }) + _spec.SetField(alert.FieldSimulated, field.TypeBool, value) _node.Simulated = value } if value, ok := ac.mutation.UUID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldUUID, - }) + _spec.SetField(alert.FieldUUID, field.TypeString, value) _node.UUID = value } if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 { @@ -740,10 +605,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -760,10 +622,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -779,10 +638,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -798,10 +654,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -815,11 +668,15 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { // AlertCreateBulk is the builder for creating many Alert entities in bulk. type AlertCreateBulk struct { config + err error builders []*AlertCreate } // Save creates the Alert entities in the database. func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) { + if acb.err != nil { + return nil, acb.err + } specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) nodes := make([]*Alert, len(acb.builders)) mutators := make([]Mutator, len(acb.builders)) @@ -836,8 +693,8 @@ func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/alert_delete.go b/pkg/database/ent/alert_delete.go index 014bcc2e0c6..15b3a4c822a 100644 --- a/pkg/database/ent/alert_delete.go +++ b/pkg/database/ent/alert_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ad *AlertDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ad.hooks) == 0 { - affected, err = ad.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ad.mutation = mutation - affected, err = ad.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ad.hooks) - 1; i >= 0; i-- { - if ad.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ad.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ad.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ad *AlertDelete) ExecX(ctx context.Context) int { } func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) if ps := ad.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + ad.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type AlertDeleteOne struct { ad *AlertDelete } +// Where appends a list predicates to the AlertDelete builder. +func (ado *AlertDeleteOne) Where(ps ...predicate.Alert) *AlertDeleteOne { + ado.ad.mutation.Where(ps...) + return ado +} + // Exec executes the deletion query. func (ado *AlertDeleteOne) Exec(ctx context.Context) error { n, err := ado.ad.Exec(ctx) @@ -111,5 +82,7 @@ func (ado *AlertDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ado *AlertDeleteOne) ExecX(ctx context.Context) { - ado.ad.ExecX(ctx) + if err := ado.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/alert_query.go b/pkg/database/ent/alert_query.go index 68789196d24..7eddb6ce024 100644 --- a/pkg/database/ent/alert_query.go +++ b/pkg/database/ent/alert_query.go @@ -22,11 +22,9 @@ import ( // AlertQuery is the builder for querying Alert entities. type AlertQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []alert.OrderOption + inters []Interceptor predicates []predicate.Alert withOwner *MachineQuery withDecisions *DecisionQuery @@ -44,34 +42,34 @@ func (aq *AlertQuery) Where(ps ...predicate.Alert) *AlertQuery { return aq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (aq *AlertQuery) Limit(limit int) *AlertQuery { - aq.limit = &limit + aq.ctx.Limit = &limit return aq } -// Offset adds an offset step to the query. +// Offset to start from. func (aq *AlertQuery) Offset(offset int) *AlertQuery { - aq.offset = &offset + aq.ctx.Offset = &offset return aq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (aq *AlertQuery) Unique(unique bool) *AlertQuery { - aq.unique = &unique + aq.ctx.Unique = &unique return aq } -// Order adds an order step to the query. -func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery { +// Order specifies how the records should be ordered. +func (aq *AlertQuery) Order(o ...alert.OrderOption) *AlertQuery { aq.order = append(aq.order, o...) return aq } // QueryOwner chains the current query on the "owner" edge. func (aq *AlertQuery) QueryOwner() *MachineQuery { - query := &MachineQuery{config: aq.config} + query := (&MachineClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -93,7 +91,7 @@ func (aq *AlertQuery) QueryOwner() *MachineQuery { // QueryDecisions chains the current query on the "decisions" edge. func (aq *AlertQuery) QueryDecisions() *DecisionQuery { - query := &DecisionQuery{config: aq.config} + query := (&DecisionClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +113,7 @@ func (aq *AlertQuery) QueryDecisions() *DecisionQuery { // QueryEvents chains the current query on the "events" edge. func (aq *AlertQuery) QueryEvents() *EventQuery { - query := &EventQuery{config: aq.config} + query := (&EventClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -137,7 +135,7 @@ func (aq *AlertQuery) QueryEvents() *EventQuery { // QueryMetas chains the current query on the "metas" edge. func (aq *AlertQuery) QueryMetas() *MetaQuery { - query := &MetaQuery{config: aq.config} + query := (&MetaClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -160,7 +158,7 @@ func (aq *AlertQuery) QueryMetas() *MetaQuery { // First returns the first Alert entity from the query. // Returns a *NotFoundError when no Alert was found. func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) { - nodes, err := aq.Limit(1).All(ctx) + nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First")) if err != nil { return nil, err } @@ -183,7 +181,7 @@ func (aq *AlertQuery) FirstX(ctx context.Context) *Alert { // Returns a *NotFoundError when no Alert ID was found. func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = aq.Limit(1).IDs(ctx); err != nil { + if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -206,7 +204,7 @@ func (aq *AlertQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Alert entity is found. // Returns a *NotFoundError when no Alert entities are found. func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) { - nodes, err := aq.Limit(2).All(ctx) + nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only")) if err != nil { return nil, err } @@ -234,7 +232,7 @@ func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert { // Returns a *NotFoundError when no entities are found. func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = aq.Limit(2).IDs(ctx); err != nil { + if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -259,10 +257,12 @@ func (aq *AlertQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Alerts. func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) { + ctx = setContextOp(ctx, aq.ctx, "All") if err := aq.prepareQuery(ctx); err != nil { return nil, err } - return aq.sqlAll(ctx) + qr := querierAll[[]*Alert, *AlertQuery]() + return withInterceptors[[]*Alert](ctx, aq, qr, aq.inters) } // AllX is like All, but panics if an error occurs. @@ -275,9 +275,12 @@ func (aq *AlertQuery) AllX(ctx context.Context) []*Alert { } // IDs executes the query and returns a list of Alert IDs. -func (aq *AlertQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { +func (aq *AlertQuery) IDs(ctx context.Context) (ids []int, err error) { + if aq.ctx.Unique == nil && aq.path != nil { + aq.Unique(true) + } + ctx = setContextOp(ctx, aq.ctx, "IDs") + if err = aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -294,10 +297,11 @@ func (aq *AlertQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (aq *AlertQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, aq.ctx, "Count") if err := aq.prepareQuery(ctx); err != nil { return 0, err } - return aq.sqlCount(ctx) + return withInterceptors[int](ctx, aq, querierCount[*AlertQuery](), aq.inters) } // CountX is like Count, but panics if an error occurs. @@ -311,10 +315,15 @@ func (aq *AlertQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) { - if err := aq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, aq.ctx, "Exist") + switch _, err := aq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return aq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -334,25 +343,24 @@ func (aq *AlertQuery) Clone() *AlertQuery { } return &AlertQuery{ config: aq.config, - limit: aq.limit, - offset: aq.offset, - order: append([]OrderFunc{}, aq.order...), + ctx: aq.ctx.Clone(), + order: append([]alert.OrderOption{}, aq.order...), + inters: append([]Interceptor{}, aq.inters...), predicates: append([]predicate.Alert{}, aq.predicates...), withOwner: aq.withOwner.Clone(), withDecisions: aq.withDecisions.Clone(), withEvents: aq.withEvents.Clone(), withMetas: aq.withMetas.Clone(), // clone intermediate query. - sql: aq.sql.Clone(), - path: aq.path, - unique: aq.unique, + sql: aq.sql.Clone(), + path: aq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery { - query := &MachineQuery{config: aq.config} + query := (&MachineClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -363,7 +371,7 @@ func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery { // WithDecisions tells the query-builder to eager-load the nodes that are connected to // the "decisions" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery { - query := &DecisionQuery{config: aq.config} + query := (&DecisionClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -374,7 +382,7 @@ func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery { // WithEvents tells the query-builder to eager-load the nodes that are connected to // the "events" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery { - query := &EventQuery{config: aq.config} + query := (&EventClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -385,7 +393,7 @@ func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery { // WithMetas tells the query-builder to eager-load the nodes that are connected to // the "metas" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery { - query := &MetaQuery{config: aq.config} + query := (&MetaClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -408,16 +416,11 @@ func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy { - grbuild := &AlertGroupBy{config: aq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := aq.prepareQuery(ctx); err != nil { - return nil, err - } - return aq.sqlQuery(ctx), nil - } + aq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AlertGroupBy{build: aq} + grbuild.flds = &aq.ctx.Fields grbuild.label = alert.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -434,15 +437,30 @@ func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy { // Select(alert.FieldCreatedAt). // Scan(ctx, &v) func (aq *AlertQuery) Select(fields ...string) *AlertSelect { - aq.fields = append(aq.fields, fields...) - selbuild := &AlertSelect{AlertQuery: aq} - selbuild.label = alert.Label - selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan - return selbuild + aq.ctx.Fields = append(aq.ctx.Fields, fields...) + sbuild := &AlertSelect{AlertQuery: aq} + sbuild.label = alert.Label + sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AlertSelect configured with the given aggregations. +func (aq *AlertQuery) Aggregate(fns ...AggregateFunc) *AlertSelect { + return aq.Select().Aggregate(fns...) } func (aq *AlertQuery) prepareQuery(ctx context.Context) error { - for _, f := range aq.fields { + for _, inter := range aq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, aq); err != nil { + return err + } + } + } + for _, f := range aq.ctx.Fields { if !alert.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -536,6 +554,9 @@ func (aq *AlertQuery) loadOwner(ctx context.Context, query *MachineQuery, nodes } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(machine.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -562,8 +583,11 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(decision.FieldAlertDecisions) + } query.Where(predicate.Decision(func(s *sql.Selector) { - s.Where(sql.InValues(alert.DecisionsColumn, fks...)) + s.Where(sql.InValues(s.C(alert.DecisionsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -573,7 +597,7 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n fk := n.AlertDecisions node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -589,8 +613,11 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [ init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(event.FieldAlertEvents) + } query.Where(predicate.Event(func(s *sql.Selector) { - s.Where(sql.InValues(alert.EventsColumn, fks...)) + s.Where(sql.InValues(s.C(alert.EventsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -600,7 +627,7 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [ fk := n.AlertEvents node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "alert_events" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "alert_events" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -616,8 +643,11 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []* init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(meta.FieldAlertMetas) + } query.Where(predicate.Meta(func(s *sql.Selector) { - s.Where(sql.InValues(alert.MetasColumn, fks...)) + s.Where(sql.InValues(s.C(alert.MetasColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -627,7 +657,7 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []* fk := n.AlertMetas node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "alert_metas" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -636,41 +666,22 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []* func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) { _spec := aq.querySpec() - _spec.Node.Columns = aq.fields - if len(aq.fields) > 0 { - _spec.Unique = aq.unique != nil && *aq.unique + _spec.Node.Columns = aq.ctx.Fields + if len(aq.ctx.Fields) > 0 { + _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique } return sqlgraph.CountNodes(ctx, aq.driver, _spec) } -func (aq *AlertQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := aq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - Columns: alert.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - From: aq.sql, - Unique: true, - } - if unique := aq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) + _spec.From = aq.sql + if unique := aq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if aq.path != nil { + _spec.Unique = true } - if fields := aq.fields; len(fields) > 0 { + if fields := aq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID) for i := range fields { @@ -686,10 +697,10 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := aq.limit; limit != nil { + if limit := aq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := aq.offset; offset != nil { + if offset := aq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := aq.order; len(ps) > 0 { @@ -705,7 +716,7 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(aq.driver.Dialect()) t1 := builder.Table(alert.Table) - columns := aq.fields + columns := aq.ctx.Fields if len(columns) == 0 { columns = alert.Columns } @@ -714,7 +725,7 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = aq.sql selector.Select(selector.Columns(columns...)...) } - if aq.unique != nil && *aq.unique { + if aq.ctx.Unique != nil && *aq.ctx.Unique { selector.Distinct() } for _, p := range aq.predicates { @@ -723,12 +734,12 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range aq.order { p(selector) } - if offset := aq.offset; offset != nil { + if offset := aq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := aq.limit; limit != nil { + if limit := aq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -736,13 +747,8 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { // AlertGroupBy is the group-by builder for Alert entities. type AlertGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *AlertQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -751,74 +757,77 @@ func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy { return agb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (agb *AlertGroupBy) Scan(ctx context.Context, v any) error { - query, err := agb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, agb.build.ctx, "GroupBy") + if err := agb.build.prepareQuery(ctx); err != nil { return err } - agb.sql = query - return agb.sqlScan(ctx, v) + return scanWithInterceptors[*AlertQuery, *AlertGroupBy](ctx, agb.build, agb, agb.build.inters, v) } -func (agb *AlertGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range agb.fields { - if !alert.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (agb *AlertGroupBy) sqlScan(ctx context.Context, root *AlertQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*agb.flds)+len(agb.fns)) + for _, f := range *agb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := agb.sqlQuery() + selector.GroupBy(selector.Columns(*agb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := agb.driver.Query(ctx, query, args, rows); err != nil { + if err := agb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (agb *AlertGroupBy) sqlQuery() *sql.Selector { - selector := agb.sql.Select() - aggregation := make([]string, 0, len(agb.fns)) - for _, fn := range agb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(agb.fields)+len(agb.fns)) - for _, f := range agb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(agb.fields...)...) -} - // AlertSelect is the builder for selecting fields of Alert entities. type AlertSelect struct { *AlertQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (as *AlertSelect) Aggregate(fns ...AggregateFunc) *AlertSelect { + as.fns = append(as.fns, fns...) + return as } // Scan applies the selector query and scans the result into the given value. func (as *AlertSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, as.ctx, "Select") if err := as.prepareQuery(ctx); err != nil { return err } - as.sql = as.AlertQuery.sqlQuery(ctx) - return as.sqlScan(ctx, v) + return scanWithInterceptors[*AlertQuery, *AlertSelect](ctx, as.AlertQuery, as, as.inters, v) } -func (as *AlertSelect) sqlScan(ctx context.Context, v any) error { +func (as *AlertSelect) sqlScan(ctx context.Context, root *AlertQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(as.fns)) + for _, fn := range as.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*as.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := as.sql.Query() + query, args := selector.Query() if err := as.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index aaa12ef20a3..0e41ba18109 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -624,35 +624,8 @@ func (au *AlertUpdate) RemoveMetas(m ...*Meta) *AlertUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (au *AlertUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) au.defaults() - if len(au.hooks) == 0 { - affected, err = au.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - au.mutation = mutation - affected, err = au.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(au.hooks) - 1; i >= 0; i-- { - if au.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = au.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, au.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -690,16 +663,7 @@ func (au *AlertUpdate) defaults() { } func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - Columns: alert.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) if ps := au.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -708,319 +672,148 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := au.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldCreatedAt, - }) + _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) } if au.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldCreatedAt, - }) + _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) } if value, ok := au.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldUpdatedAt, - }) + _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } if au.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldUpdatedAt, - }) + _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) } if value, ok := au.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenario, - }) + _spec.SetField(alert.FieldScenario, field.TypeString, value) } if value, ok := au.mutation.BucketId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldBucketId, - }) + _spec.SetField(alert.FieldBucketId, field.TypeString, value) } if au.mutation.BucketIdCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldBucketId, - }) + _spec.ClearField(alert.FieldBucketId, field.TypeString) } if value, ok := au.mutation.Message(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldMessage, - }) + _spec.SetField(alert.FieldMessage, field.TypeString, value) } if au.mutation.MessageCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldMessage, - }) + _spec.ClearField(alert.FieldMessage, field.TypeString) } if value, ok := au.mutation.EventsCount(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) } if value, ok := au.mutation.AddedEventsCount(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) } if au.mutation.EventsCountCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldEventsCount, - }) + _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } if value, ok := au.mutation.StartedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStartedAt, - }) + _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) } if au.mutation.StartedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStartedAt, - }) + _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } if value, ok := au.mutation.StoppedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStoppedAt, - }) + _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) } if au.mutation.StoppedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStoppedAt, - }) + _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } if value, ok := au.mutation.SourceIp(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceIp, - }) + _spec.SetField(alert.FieldSourceIp, field.TypeString, value) } if au.mutation.SourceIpCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceIp, - }) + _spec.ClearField(alert.FieldSourceIp, field.TypeString) } if value, ok := au.mutation.SourceRange(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceRange, - }) + _spec.SetField(alert.FieldSourceRange, field.TypeString, value) } if au.mutation.SourceRangeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceRange, - }) + _spec.ClearField(alert.FieldSourceRange, field.TypeString) } if value, ok := au.mutation.SourceAsNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsNumber, - }) + _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) } if au.mutation.SourceAsNumberCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsNumber, - }) + _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } if value, ok := au.mutation.SourceAsName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsName, - }) + _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) } if au.mutation.SourceAsNameCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsName, - }) + _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } if value, ok := au.mutation.SourceCountry(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceCountry, - }) + _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) } if au.mutation.SourceCountryCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceCountry, - }) + _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } if value, ok := au.mutation.SourceLatitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if value, ok := au.mutation.AddedSourceLatitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if au.mutation.SourceLatitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLatitude, - }) + _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } if value, ok := au.mutation.SourceLongitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if value, ok := au.mutation.AddedSourceLongitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if au.mutation.SourceLongitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLongitude, - }) + _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } if value, ok := au.mutation.SourceScope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceScope, - }) + _spec.SetField(alert.FieldSourceScope, field.TypeString, value) } if au.mutation.SourceScopeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceScope, - }) + _spec.ClearField(alert.FieldSourceScope, field.TypeString) } if value, ok := au.mutation.SourceValue(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceValue, - }) + _spec.SetField(alert.FieldSourceValue, field.TypeString, value) } if au.mutation.SourceValueCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceValue, - }) + _spec.ClearField(alert.FieldSourceValue, field.TypeString) } if value, ok := au.mutation.Capacity(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) } if value, ok := au.mutation.AddedCapacity(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) } if au.mutation.CapacityCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldCapacity, - }) + _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } if value, ok := au.mutation.LeakSpeed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldLeakSpeed, - }) + _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) } if au.mutation.LeakSpeedCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldLeakSpeed, - }) + _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } if value, ok := au.mutation.ScenarioVersion(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioVersion, - }) + _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) } if au.mutation.ScenarioVersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioVersion, - }) + _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } if value, ok := au.mutation.ScenarioHash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioHash, - }) + _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) } if au.mutation.ScenarioHashCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioHash, - }) + _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } if value, ok := au.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: alert.FieldSimulated, - }) + _spec.SetField(alert.FieldSimulated, field.TypeBool, value) } if value, ok := au.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldUUID, - }) + _spec.SetField(alert.FieldUUID, field.TypeString, value) } if au.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldUUID, - }) + _spec.ClearField(alert.FieldUUID, field.TypeString) } if au.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1030,10 +823,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1046,10 +836,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1065,10 +852,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1081,10 +865,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1100,10 +881,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1119,10 +897,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1135,10 +910,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1154,10 +926,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1173,10 +942,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1189,10 +955,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1208,10 +971,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1227,6 +987,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + au.mutation.done = true return n, nil } @@ -1828,6 +1589,12 @@ func (auo *AlertUpdateOne) RemoveMetas(m ...*Meta) *AlertUpdateOne { return auo.RemoveMetaIDs(ids...) } +// Where appends a list predicates to the AlertUpdate builder. +func (auo *AlertUpdateOne) Where(ps ...predicate.Alert) *AlertUpdateOne { + auo.mutation.Where(ps...) + return auo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOne { @@ -1837,41 +1604,8 @@ func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOn // Save executes the query and returns the updated Alert entity. func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) { - var ( - err error - node *Alert - ) auo.defaults() - if len(auo.hooks) == 0 { - node, err = auo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - auo.mutation = mutation - node, err = auo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(auo.hooks) - 1; i >= 0; i-- { - if auo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = auo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, auo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Alert) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1909,16 +1643,7 @@ func (auo *AlertUpdateOne) defaults() { } func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - Columns: alert.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) id, ok := auo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Alert.id" for update`)} @@ -1944,319 +1669,148 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } } if value, ok := auo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldCreatedAt, - }) + _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) } if auo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldCreatedAt, - }) + _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) } if value, ok := auo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldUpdatedAt, - }) + _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } if auo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldUpdatedAt, - }) + _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) } if value, ok := auo.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenario, - }) + _spec.SetField(alert.FieldScenario, field.TypeString, value) } if value, ok := auo.mutation.BucketId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldBucketId, - }) + _spec.SetField(alert.FieldBucketId, field.TypeString, value) } if auo.mutation.BucketIdCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldBucketId, - }) + _spec.ClearField(alert.FieldBucketId, field.TypeString) } if value, ok := auo.mutation.Message(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldMessage, - }) + _spec.SetField(alert.FieldMessage, field.TypeString, value) } if auo.mutation.MessageCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldMessage, - }) + _spec.ClearField(alert.FieldMessage, field.TypeString) } if value, ok := auo.mutation.EventsCount(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) } if value, ok := auo.mutation.AddedEventsCount(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) } if auo.mutation.EventsCountCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldEventsCount, - }) + _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } if value, ok := auo.mutation.StartedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStartedAt, - }) + _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) } if auo.mutation.StartedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStartedAt, - }) + _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } if value, ok := auo.mutation.StoppedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStoppedAt, - }) + _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) } if auo.mutation.StoppedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStoppedAt, - }) + _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } if value, ok := auo.mutation.SourceIp(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceIp, - }) + _spec.SetField(alert.FieldSourceIp, field.TypeString, value) } if auo.mutation.SourceIpCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceIp, - }) + _spec.ClearField(alert.FieldSourceIp, field.TypeString) } if value, ok := auo.mutation.SourceRange(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceRange, - }) + _spec.SetField(alert.FieldSourceRange, field.TypeString, value) } if auo.mutation.SourceRangeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceRange, - }) + _spec.ClearField(alert.FieldSourceRange, field.TypeString) } if value, ok := auo.mutation.SourceAsNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsNumber, - }) + _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) } if auo.mutation.SourceAsNumberCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsNumber, - }) + _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } if value, ok := auo.mutation.SourceAsName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsName, - }) + _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) } if auo.mutation.SourceAsNameCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsName, - }) + _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } if value, ok := auo.mutation.SourceCountry(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceCountry, - }) + _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) } if auo.mutation.SourceCountryCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceCountry, - }) + _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } if value, ok := auo.mutation.SourceLatitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if value, ok := auo.mutation.AddedSourceLatitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if auo.mutation.SourceLatitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLatitude, - }) + _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } if value, ok := auo.mutation.SourceLongitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if value, ok := auo.mutation.AddedSourceLongitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if auo.mutation.SourceLongitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLongitude, - }) + _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } if value, ok := auo.mutation.SourceScope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceScope, - }) + _spec.SetField(alert.FieldSourceScope, field.TypeString, value) } if auo.mutation.SourceScopeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceScope, - }) + _spec.ClearField(alert.FieldSourceScope, field.TypeString) } if value, ok := auo.mutation.SourceValue(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceValue, - }) + _spec.SetField(alert.FieldSourceValue, field.TypeString, value) } if auo.mutation.SourceValueCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceValue, - }) + _spec.ClearField(alert.FieldSourceValue, field.TypeString) } if value, ok := auo.mutation.Capacity(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) } if value, ok := auo.mutation.AddedCapacity(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) } if auo.mutation.CapacityCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldCapacity, - }) + _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } if value, ok := auo.mutation.LeakSpeed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldLeakSpeed, - }) + _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) } if auo.mutation.LeakSpeedCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldLeakSpeed, - }) + _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } if value, ok := auo.mutation.ScenarioVersion(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioVersion, - }) + _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) } if auo.mutation.ScenarioVersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioVersion, - }) + _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } if value, ok := auo.mutation.ScenarioHash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioHash, - }) + _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) } if auo.mutation.ScenarioHashCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioHash, - }) + _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } if value, ok := auo.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: alert.FieldSimulated, - }) + _spec.SetField(alert.FieldSimulated, field.TypeBool, value) } if value, ok := auo.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldUUID, - }) + _spec.SetField(alert.FieldUUID, field.TypeString, value) } if auo.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldUUID, - }) + _spec.ClearField(alert.FieldUUID, field.TypeString) } if auo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -2266,10 +1820,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2282,10 +1833,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2301,10 +1849,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2317,10 +1862,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2336,10 +1878,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2355,10 +1894,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2371,10 +1907,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2390,10 +1923,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2409,10 +1939,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2425,10 +1952,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2444,10 +1968,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2466,5 +1987,6 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } return nil, err } + auo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 068fc6c6713..fe189c3817e 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" ) @@ -37,7 +38,8 @@ type Bouncer struct { // LastPull holds the value of the "last_pull" field. LastPull time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. - AuthType string `json:"auth_type"` + AuthType string `json:"auth_type"` + selectValues sql.SelectValues } // scanValues returns the types for scanning values from sql.Rows. @@ -54,7 +56,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -142,16 +144,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.AuthType = value.String } + default: + b.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Bouncer. +// This includes values selected through modifiers, order, etc. +func (b *Bouncer) Value(name string) (ent.Value, error) { + return b.selectValues.Get(name) +} + // Update returns a builder for updating this Bouncer. // Note that you need to call Bouncer.Unwrap() before calling this method if this Bouncer // was returned from a transaction, and the transaction was committed or rolled back. func (b *Bouncer) Update() *BouncerUpdateOne { - return (&BouncerClient{config: b.config}).UpdateOne(b) + return NewBouncerClient(b.config).UpdateOne(b) } // Unwrap unwraps the Bouncer entity that was returned from a transaction after it was closed, @@ -212,9 +222,3 @@ func (b *Bouncer) String() string { // Bouncers is a parsable slice of Bouncer. type Bouncers []*Bouncer - -func (b Bouncers) config(cfg config) { - for _i := range b { - b[_i].config = cfg - } -} diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index b688594ece4..24d230d3b54 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -4,6 +4,8 @@ package bouncer import ( "time" + + "entgo.io/ent/dialect/sql" ) const ( @@ -81,3 +83,66 @@ var ( // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string ) + +// OrderOption defines the ordering options for the Bouncer queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByAPIKey orders the results by the api_key field. +func ByAPIKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKey, opts...).ToFunc() +} + +// ByRevoked orders the results by the revoked field. +func ByRevoked(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevoked, opts...).ToFunc() +} + +// ByIPAddress orders the results by the ip_address field. +func ByIPAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPAddress, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByVersion orders the results by the version field. +func ByVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVersion, opts...).ToFunc() +} + +// ByUntil orders the results by the until field. +func ByUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUntil, opts...).ToFunc() +} + +// ByLastPull orders the results by the last_pull field. +func ByLastPull(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastPull, opts...).ToFunc() +} + +// ByAuthType orders the results by the auth_type field. +func ByAuthType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAuthType, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 03a543f6d4f..5bf721dbf51 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -11,1128 +11,735 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldName, v)) } // APIKey applies equality check predicate on the "api_key" field. It's identical to APIKeyEQ. func APIKey(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAPIKey, v)) } // Revoked applies equality check predicate on the "revoked" field. It's identical to RevokedEQ. func Revoked(v bool) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevoked), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldRevoked, v)) } // IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. func IPAddress(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldIPAddress, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldType, v)) } // Version applies equality check predicate on the "version" field. It's identical to VersionEQ. func Version(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldVersion, v)) } // Until applies equality check predicate on the "until" field. It's identical to UntilEQ. func Until(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) } // LastPull applies equality check predicate on the "last_pull" field. It's identical to LastPullEQ. func LastPull(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) } // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldUpdatedAt)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldName, v)) } // APIKeyEQ applies the EQ predicate on the "api_key" field. func APIKeyEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAPIKey, v)) } // APIKeyNEQ applies the NEQ predicate on the "api_key" field. func APIKeyNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldAPIKey, v)) } // APIKeyIn applies the In predicate on the "api_key" field. func APIKeyIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAPIKey), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldAPIKey, vs...)) } // APIKeyNotIn applies the NotIn predicate on the "api_key" field. func APIKeyNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAPIKey), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldAPIKey, vs...)) } // APIKeyGT applies the GT predicate on the "api_key" field. func APIKeyGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldAPIKey, v)) } // APIKeyGTE applies the GTE predicate on the "api_key" field. func APIKeyGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldAPIKey, v)) } // APIKeyLT applies the LT predicate on the "api_key" field. func APIKeyLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldAPIKey, v)) } // APIKeyLTE applies the LTE predicate on the "api_key" field. func APIKeyLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldAPIKey, v)) } // APIKeyContains applies the Contains predicate on the "api_key" field. func APIKeyContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldAPIKey, v)) } // APIKeyHasPrefix applies the HasPrefix predicate on the "api_key" field. func APIKeyHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldAPIKey, v)) } // APIKeyHasSuffix applies the HasSuffix predicate on the "api_key" field. func APIKeyHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldAPIKey, v)) } // APIKeyEqualFold applies the EqualFold predicate on the "api_key" field. func APIKeyEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldAPIKey, v)) } // APIKeyContainsFold applies the ContainsFold predicate on the "api_key" field. func APIKeyContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldAPIKey, v)) } // RevokedEQ applies the EQ predicate on the "revoked" field. func RevokedEQ(v bool) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevoked), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldRevoked, v)) } // RevokedNEQ applies the NEQ predicate on the "revoked" field. func RevokedNEQ(v bool) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevoked), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldRevoked, v)) } // IPAddressEQ applies the EQ predicate on the "ip_address" field. func IPAddressEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldIPAddress, v)) } // IPAddressNEQ applies the NEQ predicate on the "ip_address" field. func IPAddressNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldIPAddress, v)) } // IPAddressIn applies the In predicate on the "ip_address" field. func IPAddressIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldIPAddress), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldIPAddress, vs...)) } // IPAddressNotIn applies the NotIn predicate on the "ip_address" field. func IPAddressNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldIPAddress), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldIPAddress, vs...)) } // IPAddressGT applies the GT predicate on the "ip_address" field. func IPAddressGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldIPAddress, v)) } // IPAddressGTE applies the GTE predicate on the "ip_address" field. func IPAddressGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldIPAddress, v)) } // IPAddressLT applies the LT predicate on the "ip_address" field. func IPAddressLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldIPAddress, v)) } // IPAddressLTE applies the LTE predicate on the "ip_address" field. func IPAddressLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldIPAddress, v)) } // IPAddressContains applies the Contains predicate on the "ip_address" field. func IPAddressContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldIPAddress, v)) } // IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. func IPAddressHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldIPAddress, v)) } // IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. func IPAddressHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldIPAddress, v)) } // IPAddressIsNil applies the IsNil predicate on the "ip_address" field. func IPAddressIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldIPAddress))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldIPAddress)) } // IPAddressNotNil applies the NotNil predicate on the "ip_address" field. func IPAddressNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldIPAddress))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldIPAddress)) } // IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. func IPAddressEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldIPAddress, v)) } // IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. func IPAddressContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldIPAddress, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldType, v)) } // TypeIsNil applies the IsNil predicate on the "type" field. func TypeIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldType))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldType)) } // TypeNotNil applies the NotNil predicate on the "type" field. func TypeNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldType))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldType)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldType, v)) } // VersionEQ applies the EQ predicate on the "version" field. func VersionEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldVersion, v)) } // VersionNEQ applies the NEQ predicate on the "version" field. func VersionNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldVersion, v)) } // VersionIn applies the In predicate on the "version" field. func VersionIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldVersion), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldVersion, vs...)) } // VersionNotIn applies the NotIn predicate on the "version" field. func VersionNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldVersion), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldVersion, vs...)) } // VersionGT applies the GT predicate on the "version" field. func VersionGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldVersion, v)) } // VersionGTE applies the GTE predicate on the "version" field. func VersionGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldVersion, v)) } // VersionLT applies the LT predicate on the "version" field. func VersionLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldVersion, v)) } // VersionLTE applies the LTE predicate on the "version" field. func VersionLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldVersion, v)) } // VersionContains applies the Contains predicate on the "version" field. func VersionContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldVersion, v)) } // VersionHasPrefix applies the HasPrefix predicate on the "version" field. func VersionHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldVersion, v)) } // VersionHasSuffix applies the HasSuffix predicate on the "version" field. func VersionHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldVersion, v)) } // VersionIsNil applies the IsNil predicate on the "version" field. func VersionIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldVersion))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldVersion)) } // VersionNotNil applies the NotNil predicate on the "version" field. func VersionNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldVersion))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldVersion)) } // VersionEqualFold applies the EqualFold predicate on the "version" field. func VersionEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldVersion, v)) } // VersionContainsFold applies the ContainsFold predicate on the "version" field. func VersionContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldVersion, v)) } // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) } // UntilNEQ applies the NEQ predicate on the "until" field. func UntilNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldUntil, v)) } // UntilIn applies the In predicate on the "until" field. func UntilIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUntil), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldUntil, vs...)) } // UntilNotIn applies the NotIn predicate on the "until" field. func UntilNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUntil), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldUntil, vs...)) } // UntilGT applies the GT predicate on the "until" field. func UntilGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldUntil, v)) } // UntilGTE applies the GTE predicate on the "until" field. func UntilGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldUntil, v)) } // UntilLT applies the LT predicate on the "until" field. func UntilLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldUntil, v)) } // UntilLTE applies the LTE predicate on the "until" field. func UntilLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldUntil, v)) } // UntilIsNil applies the IsNil predicate on the "until" field. func UntilIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUntil))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldUntil)) } // UntilNotNil applies the NotNil predicate on the "until" field. func UntilNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUntil))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldUntil)) } // LastPullEQ applies the EQ predicate on the "last_pull" field. func LastPullEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) } // LastPullNEQ applies the NEQ predicate on the "last_pull" field. func LastPullNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldLastPull, v)) } // LastPullIn applies the In predicate on the "last_pull" field. func LastPullIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLastPull), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldLastPull, vs...)) } // LastPullNotIn applies the NotIn predicate on the "last_pull" field. func LastPullNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLastPull), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldLastPull, vs...)) } // LastPullGT applies the GT predicate on the "last_pull" field. func LastPullGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldLastPull, v)) } // LastPullGTE applies the GTE predicate on the "last_pull" field. func LastPullGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldLastPull, v)) } // LastPullLT applies the LT predicate on the "last_pull" field. func LastPullLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldLastPull, v)) } // LastPullLTE applies the LTE predicate on the "last_pull" field. func LastPullLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldLastPull, v)) } // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } // AuthTypeNEQ applies the NEQ predicate on the "auth_type" field. func AuthTypeNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldAuthType, v)) } // AuthTypeIn applies the In predicate on the "auth_type" field. func AuthTypeIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAuthType), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldAuthType, vs...)) } // AuthTypeNotIn applies the NotIn predicate on the "auth_type" field. func AuthTypeNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAuthType), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldAuthType, vs...)) } // AuthTypeGT applies the GT predicate on the "auth_type" field. func AuthTypeGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldAuthType, v)) } // AuthTypeGTE applies the GTE predicate on the "auth_type" field. func AuthTypeGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldAuthType, v)) } // AuthTypeLT applies the LT predicate on the "auth_type" field. func AuthTypeLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldAuthType, v)) } // AuthTypeLTE applies the LTE predicate on the "auth_type" field. func AuthTypeLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldAuthType, v)) } // AuthTypeContains applies the Contains predicate on the "auth_type" field. func AuthTypeContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldAuthType, v)) } // AuthTypeHasPrefix applies the HasPrefix predicate on the "auth_type" field. func AuthTypeHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldAuthType, v)) } // AuthTypeHasSuffix applies the HasSuffix predicate on the "auth_type" field. func AuthTypeHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldAuthType, v)) } // AuthTypeEqualFold applies the EqualFold predicate on the "auth_type" field. func AuthTypeEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldAuthType, v)) } // AuthTypeContainsFold applies the ContainsFold predicate on the "auth_type" field. func AuthTypeContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldAuthType, v)) } // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Bouncer(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Bouncer) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Bouncer(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Bouncer) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Bouncer(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 685ce089d1e..3d08277dcfb 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -157,50 +157,8 @@ func (bc *BouncerCreate) Mutation() *BouncerMutation { // Save creates the Bouncer in the database. func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) { - var ( - err error - node *Bouncer - ) bc.defaults() - if len(bc.hooks) == 0 { - if err = bc.check(); err != nil { - return nil, err - } - node, err = bc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bc.check(); err != nil { - return nil, err - } - bc.mutation = mutation - if node, err = bc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(bc.hooks) - 1; i >= 0; i-- { - if bc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, bc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Bouncer) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, bc.sqlSave, bc.mutation, bc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -274,6 +232,9 @@ func (bc *BouncerCreate) check() error { } func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) { + if err := bc.check(); err != nil { + return nil, err + } _node, _spec := bc.createSpec() if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -283,106 +244,58 @@ func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + bc.mutation.id = &_node.ID + bc.mutation.done = true return _node, nil } func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { var ( _node = &Bouncer{config: bc.config} - _spec = &sqlgraph.CreateSpec{ - Table: bouncer.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(bouncer.Table, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) ) if value, ok := bc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldCreatedAt, - }) + _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := bc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUpdatedAt, - }) + _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := bc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldName, - }) + _spec.SetField(bouncer.FieldName, field.TypeString, value) _node.Name = value } if value, ok := bc.mutation.APIKey(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAPIKey, - }) + _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) _node.APIKey = value } if value, ok := bc.mutation.Revoked(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: bouncer.FieldRevoked, - }) + _spec.SetField(bouncer.FieldRevoked, field.TypeBool, value) _node.Revoked = value } if value, ok := bc.mutation.IPAddress(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldIPAddress, - }) + _spec.SetField(bouncer.FieldIPAddress, field.TypeString, value) _node.IPAddress = value } if value, ok := bc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldType, - }) + _spec.SetField(bouncer.FieldType, field.TypeString, value) _node.Type = value } if value, ok := bc.mutation.Version(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldVersion, - }) + _spec.SetField(bouncer.FieldVersion, field.TypeString, value) _node.Version = value } if value, ok := bc.mutation.Until(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUntil, - }) + _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) _node.Until = value } if value, ok := bc.mutation.LastPull(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldLastPull, - }) + _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) _node.LastPull = value } if value, ok := bc.mutation.AuthType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAuthType, - }) + _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) _node.AuthType = value } return _node, _spec @@ -391,11 +304,15 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { // BouncerCreateBulk is the builder for creating many Bouncer entities in bulk. type BouncerCreateBulk struct { config + err error builders []*BouncerCreate } // Save creates the Bouncer entities in the database. func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) { + if bcb.err != nil { + return nil, bcb.err + } specs := make([]*sqlgraph.CreateSpec, len(bcb.builders)) nodes := make([]*Bouncer, len(bcb.builders)) mutators := make([]Mutator, len(bcb.builders)) @@ -412,8 +329,8 @@ func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/bouncer_delete.go b/pkg/database/ent/bouncer_delete.go index 6bfb9459190..bf459e77e28 100644 --- a/pkg/database/ent/bouncer_delete.go +++ b/pkg/database/ent/bouncer_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(bd.hooks) == 0 { - affected, err = bd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - bd.mutation = mutation - affected, err = bd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(bd.hooks) - 1; i >= 0; i-- { - if bd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bd.sqlExec, bd.mutation, bd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (bd *BouncerDelete) ExecX(ctx context.Context) int { } func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(bouncer.Table, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) if ps := bd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + bd.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type BouncerDeleteOne struct { bd *BouncerDelete } +// Where appends a list predicates to the BouncerDelete builder. +func (bdo *BouncerDeleteOne) Where(ps ...predicate.Bouncer) *BouncerDeleteOne { + bdo.bd.mutation.Where(ps...) + return bdo +} + // Exec executes the deletion query. func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error { n, err := bdo.bd.Exec(ctx) @@ -111,5 +82,7 @@ func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (bdo *BouncerDeleteOne) ExecX(ctx context.Context) { - bdo.bd.ExecX(ctx) + if err := bdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/bouncer_query.go b/pkg/database/ent/bouncer_query.go index 2747a3e0b3a..ea2b7495733 100644 --- a/pkg/database/ent/bouncer_query.go +++ b/pkg/database/ent/bouncer_query.go @@ -17,11 +17,9 @@ import ( // BouncerQuery is the builder for querying Bouncer entities. type BouncerQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []bouncer.OrderOption + inters []Interceptor predicates []predicate.Bouncer // intermediate query (i.e. traversal path). sql *sql.Selector @@ -34,27 +32,27 @@ func (bq *BouncerQuery) Where(ps ...predicate.Bouncer) *BouncerQuery { return bq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (bq *BouncerQuery) Limit(limit int) *BouncerQuery { - bq.limit = &limit + bq.ctx.Limit = &limit return bq } -// Offset adds an offset step to the query. +// Offset to start from. func (bq *BouncerQuery) Offset(offset int) *BouncerQuery { - bq.offset = &offset + bq.ctx.Offset = &offset return bq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (bq *BouncerQuery) Unique(unique bool) *BouncerQuery { - bq.unique = &unique + bq.ctx.Unique = &unique return bq } -// Order adds an order step to the query. -func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery { +// Order specifies how the records should be ordered. +func (bq *BouncerQuery) Order(o ...bouncer.OrderOption) *BouncerQuery { bq.order = append(bq.order, o...) return bq } @@ -62,7 +60,7 @@ func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery { // First returns the first Bouncer entity from the query. // Returns a *NotFoundError when no Bouncer was found. func (bq *BouncerQuery) First(ctx context.Context) (*Bouncer, error) { - nodes, err := bq.Limit(1).All(ctx) + nodes, err := bq.Limit(1).All(setContextOp(ctx, bq.ctx, "First")) if err != nil { return nil, err } @@ -85,7 +83,7 @@ func (bq *BouncerQuery) FirstX(ctx context.Context) *Bouncer { // Returns a *NotFoundError when no Bouncer ID was found. func (bq *BouncerQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = bq.Limit(1).IDs(ctx); err != nil { + if ids, err = bq.Limit(1).IDs(setContextOp(ctx, bq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -108,7 +106,7 @@ func (bq *BouncerQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Bouncer entity is found. // Returns a *NotFoundError when no Bouncer entities are found. func (bq *BouncerQuery) Only(ctx context.Context) (*Bouncer, error) { - nodes, err := bq.Limit(2).All(ctx) + nodes, err := bq.Limit(2).All(setContextOp(ctx, bq.ctx, "Only")) if err != nil { return nil, err } @@ -136,7 +134,7 @@ func (bq *BouncerQuery) OnlyX(ctx context.Context) *Bouncer { // Returns a *NotFoundError when no entities are found. func (bq *BouncerQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = bq.Limit(2).IDs(ctx); err != nil { + if ids, err = bq.Limit(2).IDs(setContextOp(ctx, bq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -161,10 +159,12 @@ func (bq *BouncerQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Bouncers. func (bq *BouncerQuery) All(ctx context.Context) ([]*Bouncer, error) { + ctx = setContextOp(ctx, bq.ctx, "All") if err := bq.prepareQuery(ctx); err != nil { return nil, err } - return bq.sqlAll(ctx) + qr := querierAll[[]*Bouncer, *BouncerQuery]() + return withInterceptors[[]*Bouncer](ctx, bq, qr, bq.inters) } // AllX is like All, but panics if an error occurs. @@ -177,9 +177,12 @@ func (bq *BouncerQuery) AllX(ctx context.Context) []*Bouncer { } // IDs executes the query and returns a list of Bouncer IDs. -func (bq *BouncerQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { +func (bq *BouncerQuery) IDs(ctx context.Context) (ids []int, err error) { + if bq.ctx.Unique == nil && bq.path != nil { + bq.Unique(true) + } + ctx = setContextOp(ctx, bq.ctx, "IDs") + if err = bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -196,10 +199,11 @@ func (bq *BouncerQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (bq *BouncerQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, bq.ctx, "Count") if err := bq.prepareQuery(ctx); err != nil { return 0, err } - return bq.sqlCount(ctx) + return withInterceptors[int](ctx, bq, querierCount[*BouncerQuery](), bq.inters) } // CountX is like Count, but panics if an error occurs. @@ -213,10 +217,15 @@ func (bq *BouncerQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (bq *BouncerQuery) Exist(ctx context.Context) (bool, error) { - if err := bq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, bq.ctx, "Exist") + switch _, err := bq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return bq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -236,14 +245,13 @@ func (bq *BouncerQuery) Clone() *BouncerQuery { } return &BouncerQuery{ config: bq.config, - limit: bq.limit, - offset: bq.offset, - order: append([]OrderFunc{}, bq.order...), + ctx: bq.ctx.Clone(), + order: append([]bouncer.OrderOption{}, bq.order...), + inters: append([]Interceptor{}, bq.inters...), predicates: append([]predicate.Bouncer{}, bq.predicates...), // clone intermediate query. - sql: bq.sql.Clone(), - path: bq.path, - unique: bq.unique, + sql: bq.sql.Clone(), + path: bq.path, } } @@ -262,16 +270,11 @@ func (bq *BouncerQuery) Clone() *BouncerQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy { - grbuild := &BouncerGroupBy{config: bq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := bq.prepareQuery(ctx); err != nil { - return nil, err - } - return bq.sqlQuery(ctx), nil - } + bq.ctx.Fields = append([]string{field}, fields...) + grbuild := &BouncerGroupBy{build: bq} + grbuild.flds = &bq.ctx.Fields grbuild.label = bouncer.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -288,15 +291,30 @@ func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy // Select(bouncer.FieldCreatedAt). // Scan(ctx, &v) func (bq *BouncerQuery) Select(fields ...string) *BouncerSelect { - bq.fields = append(bq.fields, fields...) - selbuild := &BouncerSelect{BouncerQuery: bq} - selbuild.label = bouncer.Label - selbuild.flds, selbuild.scan = &bq.fields, selbuild.Scan - return selbuild + bq.ctx.Fields = append(bq.ctx.Fields, fields...) + sbuild := &BouncerSelect{BouncerQuery: bq} + sbuild.label = bouncer.Label + sbuild.flds, sbuild.scan = &bq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BouncerSelect configured with the given aggregations. +func (bq *BouncerQuery) Aggregate(fns ...AggregateFunc) *BouncerSelect { + return bq.Select().Aggregate(fns...) } func (bq *BouncerQuery) prepareQuery(ctx context.Context) error { - for _, f := range bq.fields { + for _, inter := range bq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, bq); err != nil { + return err + } + } + } + for _, f := range bq.ctx.Fields { if !bouncer.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -338,41 +356,22 @@ func (bq *BouncerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Boun func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) { _spec := bq.querySpec() - _spec.Node.Columns = bq.fields - if len(bq.fields) > 0 { - _spec.Unique = bq.unique != nil && *bq.unique + _spec.Node.Columns = bq.ctx.Fields + if len(bq.ctx.Fields) > 0 { + _spec.Unique = bq.ctx.Unique != nil && *bq.ctx.Unique } return sqlgraph.CountNodes(ctx, bq.driver, _spec) } -func (bq *BouncerQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := bq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - Columns: bouncer.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - From: bq.sql, - Unique: true, - } - if unique := bq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) + _spec.From = bq.sql + if unique := bq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if bq.path != nil { + _spec.Unique = true } - if fields := bq.fields; len(fields) > 0 { + if fields := bq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID) for i := range fields { @@ -388,10 +387,10 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := bq.limit; limit != nil { + if limit := bq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := bq.offset; offset != nil { + if offset := bq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := bq.order; len(ps) > 0 { @@ -407,7 +406,7 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(bq.driver.Dialect()) t1 := builder.Table(bouncer.Table) - columns := bq.fields + columns := bq.ctx.Fields if len(columns) == 0 { columns = bouncer.Columns } @@ -416,7 +415,7 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = bq.sql selector.Select(selector.Columns(columns...)...) } - if bq.unique != nil && *bq.unique { + if bq.ctx.Unique != nil && *bq.ctx.Unique { selector.Distinct() } for _, p := range bq.predicates { @@ -425,12 +424,12 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range bq.order { p(selector) } - if offset := bq.offset; offset != nil { + if offset := bq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := bq.limit; limit != nil { + if limit := bq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -438,13 +437,8 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { // BouncerGroupBy is the group-by builder for Bouncer entities. type BouncerGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *BouncerQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -453,74 +447,77 @@ func (bgb *BouncerGroupBy) Aggregate(fns ...AggregateFunc) *BouncerGroupBy { return bgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (bgb *BouncerGroupBy) Scan(ctx context.Context, v any) error { - query, err := bgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, bgb.build.ctx, "GroupBy") + if err := bgb.build.prepareQuery(ctx); err != nil { return err } - bgb.sql = query - return bgb.sqlScan(ctx, v) + return scanWithInterceptors[*BouncerQuery, *BouncerGroupBy](ctx, bgb.build, bgb, bgb.build.inters, v) } -func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range bgb.fields { - if !bouncer.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, root *BouncerQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(bgb.fns)) + for _, fn := range bgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*bgb.flds)+len(bgb.fns)) + for _, f := range *bgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := bgb.sqlQuery() + selector.GroupBy(selector.Columns(*bgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := bgb.driver.Query(ctx, query, args, rows); err != nil { + if err := bgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector { - selector := bgb.sql.Select() - aggregation := make([]string, 0, len(bgb.fns)) - for _, fn := range bgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(bgb.fields)+len(bgb.fns)) - for _, f := range bgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(bgb.fields...)...) -} - // BouncerSelect is the builder for selecting fields of Bouncer entities. type BouncerSelect struct { *BouncerQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (bs *BouncerSelect) Aggregate(fns ...AggregateFunc) *BouncerSelect { + bs.fns = append(bs.fns, fns...) + return bs } // Scan applies the selector query and scans the result into the given value. func (bs *BouncerSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, bs.ctx, "Select") if err := bs.prepareQuery(ctx); err != nil { return err } - bs.sql = bs.BouncerQuery.sqlQuery(ctx) - return bs.sqlScan(ctx, v) + return scanWithInterceptors[*BouncerQuery, *BouncerSelect](ctx, bs.BouncerQuery, bs, bs.inters, v) } -func (bs *BouncerSelect) sqlScan(ctx context.Context, v any) error { +func (bs *BouncerSelect) sqlScan(ctx context.Context, root *BouncerQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(bs.fns)) + for _, fn := range bs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*bs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := bs.sql.Query() + query, args := selector.Query() if err := bs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index acf48dedeec..f7e71eb315e 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -185,35 +185,8 @@ func (bu *BouncerUpdate) Mutation() *BouncerMutation { // Save executes the query and returns the number of nodes affected by the update operation. func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) bu.defaults() - if len(bu.hooks) == 0 { - affected, err = bu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - bu.mutation = mutation - affected, err = bu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(bu.hooks) - 1; i >= 0; i-- { - if bu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bu.sqlSave, bu.mutation, bu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -251,16 +224,7 @@ func (bu *BouncerUpdate) defaults() { } func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - Columns: bouncer.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) if ps := bu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -269,117 +233,55 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := bu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldCreatedAt, - }) + _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } if bu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldCreatedAt, - }) + _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) } if value, ok := bu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUpdatedAt, - }) + _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } if bu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUpdatedAt, - }) + _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) } if value, ok := bu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldName, - }) + _spec.SetField(bouncer.FieldName, field.TypeString, value) } if value, ok := bu.mutation.APIKey(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAPIKey, - }) + _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } if value, ok := bu.mutation.Revoked(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: bouncer.FieldRevoked, - }) + _spec.SetField(bouncer.FieldRevoked, field.TypeBool, value) } if value, ok := bu.mutation.IPAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldIPAddress, - }) + _spec.SetField(bouncer.FieldIPAddress, field.TypeString, value) } if bu.mutation.IPAddressCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldIPAddress, - }) + _spec.ClearField(bouncer.FieldIPAddress, field.TypeString) } if value, ok := bu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldType, - }) + _spec.SetField(bouncer.FieldType, field.TypeString, value) } if bu.mutation.TypeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldType, - }) + _spec.ClearField(bouncer.FieldType, field.TypeString) } if value, ok := bu.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldVersion, - }) + _spec.SetField(bouncer.FieldVersion, field.TypeString, value) } if bu.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldVersion, - }) + _spec.ClearField(bouncer.FieldVersion, field.TypeString) } if value, ok := bu.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUntil, - }) + _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) } if bu.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUntil, - }) + _spec.ClearField(bouncer.FieldUntil, field.TypeTime) } if value, ok := bu.mutation.LastPull(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldLastPull, - }) + _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } if value, ok := bu.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAuthType, - }) + _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { @@ -389,6 +291,7 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + bu.mutation.done = true return n, nil } @@ -555,6 +458,12 @@ func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { return buo.mutation } +// Where appends a list predicates to the BouncerUpdate builder. +func (buo *BouncerUpdateOne) Where(ps ...predicate.Bouncer) *BouncerUpdateOne { + buo.mutation.Where(ps...) + return buo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpdateOne { @@ -564,41 +473,8 @@ func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpda // Save executes the query and returns the updated Bouncer entity. func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) { - var ( - err error - node *Bouncer - ) buo.defaults() - if len(buo.hooks) == 0 { - node, err = buo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - buo.mutation = mutation - node, err = buo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(buo.hooks) - 1; i >= 0; i-- { - if buo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = buo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, buo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Bouncer) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, buo.sqlSave, buo.mutation, buo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -636,16 +512,7 @@ func (buo *BouncerUpdateOne) defaults() { } func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - Columns: bouncer.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) id, ok := buo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Bouncer.id" for update`)} @@ -671,117 +538,55 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e } } if value, ok := buo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldCreatedAt, - }) + _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } if buo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldCreatedAt, - }) + _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) } if value, ok := buo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUpdatedAt, - }) + _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } if buo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUpdatedAt, - }) + _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) } if value, ok := buo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldName, - }) + _spec.SetField(bouncer.FieldName, field.TypeString, value) } if value, ok := buo.mutation.APIKey(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAPIKey, - }) + _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } if value, ok := buo.mutation.Revoked(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: bouncer.FieldRevoked, - }) + _spec.SetField(bouncer.FieldRevoked, field.TypeBool, value) } if value, ok := buo.mutation.IPAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldIPAddress, - }) + _spec.SetField(bouncer.FieldIPAddress, field.TypeString, value) } if buo.mutation.IPAddressCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldIPAddress, - }) + _spec.ClearField(bouncer.FieldIPAddress, field.TypeString) } if value, ok := buo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldType, - }) + _spec.SetField(bouncer.FieldType, field.TypeString, value) } if buo.mutation.TypeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldType, - }) + _spec.ClearField(bouncer.FieldType, field.TypeString) } if value, ok := buo.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldVersion, - }) + _spec.SetField(bouncer.FieldVersion, field.TypeString, value) } if buo.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldVersion, - }) + _spec.ClearField(bouncer.FieldVersion, field.TypeString) } if value, ok := buo.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUntil, - }) + _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) } if buo.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUntil, - }) + _spec.ClearField(bouncer.FieldUntil, field.TypeTime) } if value, ok := buo.mutation.LastPull(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldLastPull, - }) + _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } if value, ok := buo.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAuthType, - }) + _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } _node = &Bouncer{config: buo.config} _spec.Assign = _node.assignValues @@ -794,5 +599,6 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e } return nil, err } + buo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 815b1df6d16..2761ff088b5 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -7,9 +7,14 @@ import ( "errors" "fmt" "log" + "reflect" "github.com/crowdsecurity/crowdsec/pkg/database/ent/migrate" + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" @@ -17,10 +22,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" - - "entgo.io/ent/dialect" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" ) // Client is the client that holds all ent builders. @@ -46,7 +47,7 @@ type Client struct { // NewClient creates a new client configured with the given options. func NewClient(opts ...Option) *Client { - cfg := config{log: log.Println, hooks: &hooks{}} + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} cfg.options(opts...) client := &Client{config: cfg} client.init() @@ -64,6 +65,55 @@ func (c *Client) init() { c.Meta = NewMetaClient(c.config) } +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + // Open opens a database/sql.DB specified by the driver name and // the data source name, and returns a new client attached to it. // Optional parameters can be added for configuring the client. @@ -80,11 +130,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error) } } +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + // Tx returns a new transactional client. The provided context // is used until the transaction is committed or rolled back. func (c *Client) Tx(ctx context.Context) (*Tx, error) { if _, ok := c.driver.(*txDriver); ok { - return nil, errors.New("ent: cannot start a transaction within a transaction") + return nil, ErrTxStarted } tx, err := newTx(ctx, c.driver) if err != nil { @@ -156,13 +209,43 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { - c.Alert.Use(hooks...) - c.Bouncer.Use(hooks...) - c.ConfigItem.Use(hooks...) - c.Decision.Use(hooks...) - c.Event.Use(hooks...) - c.Machine.Use(hooks...) - c.Meta.Use(hooks...) + for _, n := range []interface{ Use(...Hook) }{ + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *AlertMutation: + return c.Alert.mutate(ctx, m) + case *BouncerMutation: + return c.Bouncer.mutate(ctx, m) + case *ConfigItemMutation: + return c.ConfigItem.mutate(ctx, m) + case *DecisionMutation: + return c.Decision.mutate(ctx, m) + case *EventMutation: + return c.Event.mutate(ctx, m) + case *MachineMutation: + return c.Machine.mutate(ctx, m) + case *MetaMutation: + return c.Meta.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } } // AlertClient is a client for the Alert schema. @@ -181,6 +264,12 @@ func (c *AlertClient) Use(hooks ...Hook) { c.hooks.Alert = append(c.hooks.Alert, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `alert.Intercept(f(g(h())))`. +func (c *AlertClient) Intercept(interceptors ...Interceptor) { + c.inters.Alert = append(c.inters.Alert, interceptors...) +} + // Create returns a builder for creating a Alert entity. func (c *AlertClient) Create() *AlertCreate { mutation := newAlertMutation(c.config, OpCreate) @@ -192,6 +281,21 @@ func (c *AlertClient) CreateBulk(builders ...*AlertCreate) *AlertCreateBulk { return &AlertCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AlertClient) MapCreateBulk(slice any, setFunc func(*AlertCreate, int)) *AlertCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AlertCreateBulk{err: fmt.Errorf("calling to AlertClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AlertCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AlertCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Alert. func (c *AlertClient) Update() *AlertUpdate { mutation := newAlertMutation(c.config, OpUpdate) @@ -221,7 +325,7 @@ func (c *AlertClient) DeleteOne(a *Alert) *AlertDeleteOne { return c.DeleteOneID(a.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne { builder := c.Delete().Where(alert.ID(id)) builder.mutation.id = &id @@ -233,6 +337,8 @@ func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne { func (c *AlertClient) Query() *AlertQuery { return &AlertQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAlert}, + inters: c.Interceptors(), } } @@ -252,8 +358,8 @@ func (c *AlertClient) GetX(ctx context.Context, id int) *Alert { // QueryOwner queries the owner edge of a Alert. func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery { - query := &MachineQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&MachineClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -268,8 +374,8 @@ func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery { // QueryDecisions queries the decisions edge of a Alert. func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery { - query := &DecisionQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DecisionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -284,8 +390,8 @@ func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery { // QueryEvents queries the events edge of a Alert. func (c *AlertClient) QueryEvents(a *Alert) *EventQuery { - query := &EventQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EventClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -300,8 +406,8 @@ func (c *AlertClient) QueryEvents(a *Alert) *EventQuery { // QueryMetas queries the metas edge of a Alert. func (c *AlertClient) QueryMetas(a *Alert) *MetaQuery { - query := &MetaQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&MetaClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -319,6 +425,26 @@ func (c *AlertClient) Hooks() []Hook { return c.hooks.Alert } +// Interceptors returns the client interceptors. +func (c *AlertClient) Interceptors() []Interceptor { + return c.inters.Alert +} + +func (c *AlertClient) mutate(ctx context.Context, m *AlertMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AlertCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AlertUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AlertDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Alert mutation op: %q", m.Op()) + } +} + // BouncerClient is a client for the Bouncer schema. type BouncerClient struct { config @@ -335,6 +461,12 @@ func (c *BouncerClient) Use(hooks ...Hook) { c.hooks.Bouncer = append(c.hooks.Bouncer, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `bouncer.Intercept(f(g(h())))`. +func (c *BouncerClient) Intercept(interceptors ...Interceptor) { + c.inters.Bouncer = append(c.inters.Bouncer, interceptors...) +} + // Create returns a builder for creating a Bouncer entity. func (c *BouncerClient) Create() *BouncerCreate { mutation := newBouncerMutation(c.config, OpCreate) @@ -346,6 +478,21 @@ func (c *BouncerClient) CreateBulk(builders ...*BouncerCreate) *BouncerCreateBul return &BouncerCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BouncerClient) MapCreateBulk(slice any, setFunc func(*BouncerCreate, int)) *BouncerCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BouncerCreateBulk{err: fmt.Errorf("calling to BouncerClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BouncerCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BouncerCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Bouncer. func (c *BouncerClient) Update() *BouncerUpdate { mutation := newBouncerMutation(c.config, OpUpdate) @@ -375,7 +522,7 @@ func (c *BouncerClient) DeleteOne(b *Bouncer) *BouncerDeleteOne { return c.DeleteOneID(b.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne { builder := c.Delete().Where(bouncer.ID(id)) builder.mutation.id = &id @@ -387,6 +534,8 @@ func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne { func (c *BouncerClient) Query() *BouncerQuery { return &BouncerQuery{ config: c.config, + ctx: &QueryContext{Type: TypeBouncer}, + inters: c.Interceptors(), } } @@ -409,6 +558,26 @@ func (c *BouncerClient) Hooks() []Hook { return c.hooks.Bouncer } +// Interceptors returns the client interceptors. +func (c *BouncerClient) Interceptors() []Interceptor { + return c.inters.Bouncer +} + +func (c *BouncerClient) mutate(ctx context.Context, m *BouncerMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BouncerCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BouncerUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BouncerDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Bouncer mutation op: %q", m.Op()) + } +} + // ConfigItemClient is a client for the ConfigItem schema. type ConfigItemClient struct { config @@ -425,6 +594,12 @@ func (c *ConfigItemClient) Use(hooks ...Hook) { c.hooks.ConfigItem = append(c.hooks.ConfigItem, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `configitem.Intercept(f(g(h())))`. +func (c *ConfigItemClient) Intercept(interceptors ...Interceptor) { + c.inters.ConfigItem = append(c.inters.ConfigItem, interceptors...) +} + // Create returns a builder for creating a ConfigItem entity. func (c *ConfigItemClient) Create() *ConfigItemCreate { mutation := newConfigItemMutation(c.config, OpCreate) @@ -436,6 +611,21 @@ func (c *ConfigItemClient) CreateBulk(builders ...*ConfigItemCreate) *ConfigItem return &ConfigItemCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ConfigItemClient) MapCreateBulk(slice any, setFunc func(*ConfigItemCreate, int)) *ConfigItemCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ConfigItemCreateBulk{err: fmt.Errorf("calling to ConfigItemClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ConfigItemCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ConfigItemCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ConfigItem. func (c *ConfigItemClient) Update() *ConfigItemUpdate { mutation := newConfigItemMutation(c.config, OpUpdate) @@ -465,7 +655,7 @@ func (c *ConfigItemClient) DeleteOne(ci *ConfigItem) *ConfigItemDeleteOne { return c.DeleteOneID(ci.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne { builder := c.Delete().Where(configitem.ID(id)) builder.mutation.id = &id @@ -477,6 +667,8 @@ func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne { func (c *ConfigItemClient) Query() *ConfigItemQuery { return &ConfigItemQuery{ config: c.config, + ctx: &QueryContext{Type: TypeConfigItem}, + inters: c.Interceptors(), } } @@ -499,6 +691,26 @@ func (c *ConfigItemClient) Hooks() []Hook { return c.hooks.ConfigItem } +// Interceptors returns the client interceptors. +func (c *ConfigItemClient) Interceptors() []Interceptor { + return c.inters.ConfigItem +} + +func (c *ConfigItemClient) mutate(ctx context.Context, m *ConfigItemMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ConfigItemCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ConfigItemUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ConfigItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ConfigItemDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ConfigItem mutation op: %q", m.Op()) + } +} + // DecisionClient is a client for the Decision schema. type DecisionClient struct { config @@ -515,6 +727,12 @@ func (c *DecisionClient) Use(hooks ...Hook) { c.hooks.Decision = append(c.hooks.Decision, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `decision.Intercept(f(g(h())))`. +func (c *DecisionClient) Intercept(interceptors ...Interceptor) { + c.inters.Decision = append(c.inters.Decision, interceptors...) +} + // Create returns a builder for creating a Decision entity. func (c *DecisionClient) Create() *DecisionCreate { mutation := newDecisionMutation(c.config, OpCreate) @@ -526,6 +744,21 @@ func (c *DecisionClient) CreateBulk(builders ...*DecisionCreate) *DecisionCreate return &DecisionCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DecisionClient) MapCreateBulk(slice any, setFunc func(*DecisionCreate, int)) *DecisionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DecisionCreateBulk{err: fmt.Errorf("calling to DecisionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DecisionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DecisionCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Decision. func (c *DecisionClient) Update() *DecisionUpdate { mutation := newDecisionMutation(c.config, OpUpdate) @@ -555,7 +788,7 @@ func (c *DecisionClient) DeleteOne(d *Decision) *DecisionDeleteOne { return c.DeleteOneID(d.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne { builder := c.Delete().Where(decision.ID(id)) builder.mutation.id = &id @@ -567,6 +800,8 @@ func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne { func (c *DecisionClient) Query() *DecisionQuery { return &DecisionQuery{ config: c.config, + ctx: &QueryContext{Type: TypeDecision}, + inters: c.Interceptors(), } } @@ -586,8 +821,8 @@ func (c *DecisionClient) GetX(ctx context.Context, id int) *Decision { // QueryOwner queries the owner edge of a Decision. func (c *DecisionClient) QueryOwner(d *Decision) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := d.ID step := sqlgraph.NewStep( sqlgraph.From(decision.Table, decision.FieldID, id), @@ -605,6 +840,26 @@ func (c *DecisionClient) Hooks() []Hook { return c.hooks.Decision } +// Interceptors returns the client interceptors. +func (c *DecisionClient) Interceptors() []Interceptor { + return c.inters.Decision +} + +func (c *DecisionClient) mutate(ctx context.Context, m *DecisionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DecisionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DecisionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DecisionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Decision mutation op: %q", m.Op()) + } +} + // EventClient is a client for the Event schema. type EventClient struct { config @@ -621,6 +876,12 @@ func (c *EventClient) Use(hooks ...Hook) { c.hooks.Event = append(c.hooks.Event, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `event.Intercept(f(g(h())))`. +func (c *EventClient) Intercept(interceptors ...Interceptor) { + c.inters.Event = append(c.inters.Event, interceptors...) +} + // Create returns a builder for creating a Event entity. func (c *EventClient) Create() *EventCreate { mutation := newEventMutation(c.config, OpCreate) @@ -632,6 +893,21 @@ func (c *EventClient) CreateBulk(builders ...*EventCreate) *EventCreateBulk { return &EventCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *EventClient) MapCreateBulk(slice any, setFunc func(*EventCreate, int)) *EventCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &EventCreateBulk{err: fmt.Errorf("calling to EventClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*EventCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &EventCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Event. func (c *EventClient) Update() *EventUpdate { mutation := newEventMutation(c.config, OpUpdate) @@ -661,7 +937,7 @@ func (c *EventClient) DeleteOne(e *Event) *EventDeleteOne { return c.DeleteOneID(e.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *EventClient) DeleteOneID(id int) *EventDeleteOne { builder := c.Delete().Where(event.ID(id)) builder.mutation.id = &id @@ -673,6 +949,8 @@ func (c *EventClient) DeleteOneID(id int) *EventDeleteOne { func (c *EventClient) Query() *EventQuery { return &EventQuery{ config: c.config, + ctx: &QueryContext{Type: TypeEvent}, + inters: c.Interceptors(), } } @@ -692,8 +970,8 @@ func (c *EventClient) GetX(ctx context.Context, id int) *Event { // QueryOwner queries the owner edge of a Event. func (c *EventClient) QueryOwner(e *Event) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(event.Table, event.FieldID, id), @@ -711,6 +989,26 @@ func (c *EventClient) Hooks() []Hook { return c.hooks.Event } +// Interceptors returns the client interceptors. +func (c *EventClient) Interceptors() []Interceptor { + return c.inters.Event +} + +func (c *EventClient) mutate(ctx context.Context, m *EventMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&EventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&EventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&EventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Event mutation op: %q", m.Op()) + } +} + // MachineClient is a client for the Machine schema. type MachineClient struct { config @@ -727,6 +1025,12 @@ func (c *MachineClient) Use(hooks ...Hook) { c.hooks.Machine = append(c.hooks.Machine, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `machine.Intercept(f(g(h())))`. +func (c *MachineClient) Intercept(interceptors ...Interceptor) { + c.inters.Machine = append(c.inters.Machine, interceptors...) +} + // Create returns a builder for creating a Machine entity. func (c *MachineClient) Create() *MachineCreate { mutation := newMachineMutation(c.config, OpCreate) @@ -738,6 +1042,21 @@ func (c *MachineClient) CreateBulk(builders ...*MachineCreate) *MachineCreateBul return &MachineCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MachineClient) MapCreateBulk(slice any, setFunc func(*MachineCreate, int)) *MachineCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MachineCreateBulk{err: fmt.Errorf("calling to MachineClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MachineCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MachineCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Machine. func (c *MachineClient) Update() *MachineUpdate { mutation := newMachineMutation(c.config, OpUpdate) @@ -767,7 +1086,7 @@ func (c *MachineClient) DeleteOne(m *Machine) *MachineDeleteOne { return c.DeleteOneID(m.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne { builder := c.Delete().Where(machine.ID(id)) builder.mutation.id = &id @@ -779,6 +1098,8 @@ func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne { func (c *MachineClient) Query() *MachineQuery { return &MachineQuery{ config: c.config, + ctx: &QueryContext{Type: TypeMachine}, + inters: c.Interceptors(), } } @@ -798,8 +1119,8 @@ func (c *MachineClient) GetX(ctx context.Context, id int) *Machine { // QueryAlerts queries the alerts edge of a Machine. func (c *MachineClient) QueryAlerts(m *Machine) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := m.ID step := sqlgraph.NewStep( sqlgraph.From(machine.Table, machine.FieldID, id), @@ -817,6 +1138,26 @@ func (c *MachineClient) Hooks() []Hook { return c.hooks.Machine } +// Interceptors returns the client interceptors. +func (c *MachineClient) Interceptors() []Interceptor { + return c.inters.Machine +} + +func (c *MachineClient) mutate(ctx context.Context, m *MachineMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MachineCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MachineDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Machine mutation op: %q", m.Op()) + } +} + // MetaClient is a client for the Meta schema. type MetaClient struct { config @@ -833,6 +1174,12 @@ func (c *MetaClient) Use(hooks ...Hook) { c.hooks.Meta = append(c.hooks.Meta, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `meta.Intercept(f(g(h())))`. +func (c *MetaClient) Intercept(interceptors ...Interceptor) { + c.inters.Meta = append(c.inters.Meta, interceptors...) +} + // Create returns a builder for creating a Meta entity. func (c *MetaClient) Create() *MetaCreate { mutation := newMetaMutation(c.config, OpCreate) @@ -844,6 +1191,21 @@ func (c *MetaClient) CreateBulk(builders ...*MetaCreate) *MetaCreateBulk { return &MetaCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MetaClient) MapCreateBulk(slice any, setFunc func(*MetaCreate, int)) *MetaCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MetaCreateBulk{err: fmt.Errorf("calling to MetaClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MetaCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MetaCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Meta. func (c *MetaClient) Update() *MetaUpdate { mutation := newMetaMutation(c.config, OpUpdate) @@ -873,7 +1235,7 @@ func (c *MetaClient) DeleteOne(m *Meta) *MetaDeleteOne { return c.DeleteOneID(m.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne { builder := c.Delete().Where(meta.ID(id)) builder.mutation.id = &id @@ -885,6 +1247,8 @@ func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne { func (c *MetaClient) Query() *MetaQuery { return &MetaQuery{ config: c.config, + ctx: &QueryContext{Type: TypeMeta}, + inters: c.Interceptors(), } } @@ -904,8 +1268,8 @@ func (c *MetaClient) GetX(ctx context.Context, id int) *Meta { // QueryOwner queries the owner edge of a Meta. func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := m.ID step := sqlgraph.NewStep( sqlgraph.From(meta.Table, meta.FieldID, id), @@ -922,3 +1286,33 @@ func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery { func (c *MetaClient) Hooks() []Hook { return c.hooks.Meta } + +// Interceptors returns the client interceptors. +func (c *MetaClient) Interceptors() []Interceptor { + return c.inters.Meta +} + +func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MetaCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MetaUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MetaDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Meta mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Hook + } + inters struct { + Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Interceptor + } +) diff --git a/pkg/database/ent/config.go b/pkg/database/ent/config.go deleted file mode 100644 index 1a152809a32..00000000000 --- a/pkg/database/ent/config.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "entgo.io/ent" - "entgo.io/ent/dialect" -) - -// Option function to configure the client. -type Option func(*config) - -// Config is the configuration for the client and its builder. -type config struct { - // driver used for executing database requests. - driver dialect.Driver - // debug enable a debug logging. - debug bool - // log used for logging on debug mode. - log func(...any) - // hooks to execute on mutations. - hooks *hooks -} - -// hooks per client, for fast access. -type hooks struct { - Alert []ent.Hook - Bouncer []ent.Hook - ConfigItem []ent.Hook - Decision []ent.Hook - Event []ent.Hook - Machine []ent.Hook - Meta []ent.Hook -} - -// Options applies the options on the config object. -func (c *config) options(opts ...Option) { - for _, opt := range opts { - opt(c) - } - if c.debug { - c.driver = dialect.Debug(c.driver, c.log) - } -} - -// Debug enables debug logging on the ent.Driver. -func Debug() Option { - return func(c *config) { - c.debug = true - } -} - -// Log sets the logging function for debug mode. -func Log(fn func(...any)) Option { - return func(c *config) { - c.log = fn - } -} - -// Driver configures the client driver. -func Driver(driver dialect.Driver) Option { - return func(c *config) { - c.driver = driver - } -} diff --git a/pkg/database/ent/configitem.go b/pkg/database/ent/configitem.go index 615780dbacc..467e54386f6 100644 --- a/pkg/database/ent/configitem.go +++ b/pkg/database/ent/configitem.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) @@ -23,7 +24,8 @@ type ConfigItem struct { // Name holds the value of the "name" field. Name string `json:"name"` // Value holds the value of the "value" field. - Value string `json:"value"` + Value string `json:"value"` + selectValues sql.SelectValues } // scanValues returns the types for scanning values from sql.Rows. @@ -38,7 +40,7 @@ func (*ConfigItem) scanValues(columns []string) ([]any, error) { case configitem.FieldCreatedAt, configitem.FieldUpdatedAt: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type ConfigItem", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -84,16 +86,24 @@ func (ci *ConfigItem) assignValues(columns []string, values []any) error { } else if value.Valid { ci.Value = value.String } + default: + ci.selectValues.Set(columns[i], values[i]) } } return nil } +// GetValue returns the ent.Value that was dynamically selected and assigned to the ConfigItem. +// This includes values selected through modifiers, order, etc. +func (ci *ConfigItem) GetValue(name string) (ent.Value, error) { + return ci.selectValues.Get(name) +} + // Update returns a builder for updating this ConfigItem. // Note that you need to call ConfigItem.Unwrap() before calling this method if this ConfigItem // was returned from a transaction, and the transaction was committed or rolled back. func (ci *ConfigItem) Update() *ConfigItemUpdateOne { - return (&ConfigItemClient{config: ci.config}).UpdateOne(ci) + return NewConfigItemClient(ci.config).UpdateOne(ci) } // Unwrap unwraps the ConfigItem entity that was returned from a transaction after it was closed, @@ -133,9 +143,3 @@ func (ci *ConfigItem) String() string { // ConfigItems is a parsable slice of ConfigItem. type ConfigItems []*ConfigItem - -func (ci ConfigItems) config(cfg config) { - for _i := range ci { - ci[_i].config = cfg - } -} diff --git a/pkg/database/ent/configitem/configitem.go b/pkg/database/ent/configitem/configitem.go index 80e93e4cc7e..a6ff6c32d57 100644 --- a/pkg/database/ent/configitem/configitem.go +++ b/pkg/database/ent/configitem/configitem.go @@ -4,6 +4,8 @@ package configitem import ( "time" + + "entgo.io/ent/dialect/sql" ) const ( @@ -52,3 +54,31 @@ var ( // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. UpdateDefaultUpdatedAt func() time.Time ) + +// OrderOption defines the ordering options for the ConfigItem queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} diff --git a/pkg/database/ent/configitem/where.go b/pkg/database/ent/configitem/where.go index 6d06938a855..767f0b420f1 100644 --- a/pkg/database/ent/configitem/where.go +++ b/pkg/database/ent/configitem/where.go @@ -11,485 +11,310 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) } // Value applies equality check predicate on the "value" field. It's identical to ValueEQ. func Value(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldValue, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.ConfigItem(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.ConfigItem(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.ConfigItem(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.ConfigItem(sql.FieldNotNull(FieldUpdatedAt)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldContainsFold(FieldName, v)) } // ValueEQ applies the EQ predicate on the "value" field. func ValueEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldValue, v)) } // ValueNEQ applies the NEQ predicate on the "value" field. func ValueNEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldValue, v)) } // ValueIn applies the In predicate on the "value" field. func ValueIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldValue), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldValue, vs...)) } // ValueNotIn applies the NotIn predicate on the "value" field. func ValueNotIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldValue), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldValue, vs...)) } // ValueGT applies the GT predicate on the "value" field. func ValueGT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldValue, v)) } // ValueGTE applies the GTE predicate on the "value" field. func ValueGTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldValue, v)) } // ValueLT applies the LT predicate on the "value" field. func ValueLT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldValue, v)) } // ValueLTE applies the LTE predicate on the "value" field. func ValueLTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldValue, v)) } // ValueContains applies the Contains predicate on the "value" field. func ValueContains(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldContains(FieldValue, v)) } // ValueHasPrefix applies the HasPrefix predicate on the "value" field. func ValueHasPrefix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldHasPrefix(FieldValue, v)) } // ValueHasSuffix applies the HasSuffix predicate on the "value" field. func ValueHasSuffix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldHasSuffix(FieldValue, v)) } // ValueEqualFold applies the EqualFold predicate on the "value" field. func ValueEqualFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldEqualFold(FieldValue, v)) } // ValueContainsFold applies the ContainsFold predicate on the "value" field. func ValueContainsFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldContainsFold(FieldValue, v)) } // And groups predicates with the AND operator between them. func And(predicates ...predicate.ConfigItem) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ConfigItem(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ConfigItem) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ConfigItem(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ConfigItem) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ConfigItem(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/configitem_create.go b/pkg/database/ent/configitem_create.go index 736e6a50514..19e73dea41c 100644 --- a/pkg/database/ent/configitem_create.go +++ b/pkg/database/ent/configitem_create.go @@ -67,50 +67,8 @@ func (cic *ConfigItemCreate) Mutation() *ConfigItemMutation { // Save creates the ConfigItem in the database. func (cic *ConfigItemCreate) Save(ctx context.Context) (*ConfigItem, error) { - var ( - err error - node *ConfigItem - ) cic.defaults() - if len(cic.hooks) == 0 { - if err = cic.check(); err != nil { - return nil, err - } - node, err = cic.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = cic.check(); err != nil { - return nil, err - } - cic.mutation = mutation - if node, err = cic.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(cic.hooks) - 1; i >= 0; i-- { - if cic.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cic.hooks[i](mut) - } - v, err := mut.Mutate(ctx, cic.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*ConfigItem) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from ConfigItemMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, cic.sqlSave, cic.mutation, cic.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -159,6 +117,9 @@ func (cic *ConfigItemCreate) check() error { } func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) { + if err := cic.check(); err != nil { + return nil, err + } _node, _spec := cic.createSpec() if err := sqlgraph.CreateNode(ctx, cic.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -168,50 +129,30 @@ func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + cic.mutation.id = &_node.ID + cic.mutation.done = true return _node, nil } func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { var ( _node = &ConfigItem{config: cic.config} - _spec = &sqlgraph.CreateSpec{ - Table: configitem.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(configitem.Table, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) ) if value, ok := cic.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldCreatedAt, - }) + _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := cic.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldUpdatedAt, - }) + _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := cic.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldName, - }) + _spec.SetField(configitem.FieldName, field.TypeString, value) _node.Name = value } if value, ok := cic.mutation.Value(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldValue, - }) + _spec.SetField(configitem.FieldValue, field.TypeString, value) _node.Value = value } return _node, _spec @@ -220,11 +161,15 @@ func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { // ConfigItemCreateBulk is the builder for creating many ConfigItem entities in bulk. type ConfigItemCreateBulk struct { config + err error builders []*ConfigItemCreate } // Save creates the ConfigItem entities in the database. func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, error) { + if cicb.err != nil { + return nil, cicb.err + } specs := make([]*sqlgraph.CreateSpec, len(cicb.builders)) nodes := make([]*ConfigItem, len(cicb.builders)) mutators := make([]Mutator, len(cicb.builders)) @@ -241,8 +186,8 @@ func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, erro return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, cicb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/configitem_delete.go b/pkg/database/ent/configitem_delete.go index 223fa9eefbf..a5dc811f60d 100644 --- a/pkg/database/ent/configitem_delete.go +++ b/pkg/database/ent/configitem_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (cid *ConfigItemDelete) Where(ps ...predicate.ConfigItem) *ConfigItemDelete // Exec executes the deletion query and returns how many vertices were deleted. func (cid *ConfigItemDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(cid.hooks) == 0 { - affected, err = cid.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - cid.mutation = mutation - affected, err = cid.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(cid.hooks) - 1; i >= 0; i-- { - if cid.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cid.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cid.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, cid.sqlExec, cid.mutation, cid.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (cid *ConfigItemDelete) ExecX(ctx context.Context) int { } func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(configitem.Table, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) if ps := cid.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + cid.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type ConfigItemDeleteOne struct { cid *ConfigItemDelete } +// Where appends a list predicates to the ConfigItemDelete builder. +func (cido *ConfigItemDeleteOne) Where(ps ...predicate.ConfigItem) *ConfigItemDeleteOne { + cido.cid.mutation.Where(ps...) + return cido +} + // Exec executes the deletion query. func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error { n, err := cido.cid.Exec(ctx) @@ -111,5 +82,7 @@ func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (cido *ConfigItemDeleteOne) ExecX(ctx context.Context) { - cido.cid.ExecX(ctx) + if err := cido.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/configitem_query.go b/pkg/database/ent/configitem_query.go index 6c9e6732a9b..f68b8953ddb 100644 --- a/pkg/database/ent/configitem_query.go +++ b/pkg/database/ent/configitem_query.go @@ -17,11 +17,9 @@ import ( // ConfigItemQuery is the builder for querying ConfigItem entities. type ConfigItemQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []configitem.OrderOption + inters []Interceptor predicates []predicate.ConfigItem // intermediate query (i.e. traversal path). sql *sql.Selector @@ -34,27 +32,27 @@ func (ciq *ConfigItemQuery) Where(ps ...predicate.ConfigItem) *ConfigItemQuery { return ciq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (ciq *ConfigItemQuery) Limit(limit int) *ConfigItemQuery { - ciq.limit = &limit + ciq.ctx.Limit = &limit return ciq } -// Offset adds an offset step to the query. +// Offset to start from. func (ciq *ConfigItemQuery) Offset(offset int) *ConfigItemQuery { - ciq.offset = &offset + ciq.ctx.Offset = &offset return ciq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (ciq *ConfigItemQuery) Unique(unique bool) *ConfigItemQuery { - ciq.unique = &unique + ciq.ctx.Unique = &unique return ciq } -// Order adds an order step to the query. -func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery { +// Order specifies how the records should be ordered. +func (ciq *ConfigItemQuery) Order(o ...configitem.OrderOption) *ConfigItemQuery { ciq.order = append(ciq.order, o...) return ciq } @@ -62,7 +60,7 @@ func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery { // First returns the first ConfigItem entity from the query. // Returns a *NotFoundError when no ConfigItem was found. func (ciq *ConfigItemQuery) First(ctx context.Context) (*ConfigItem, error) { - nodes, err := ciq.Limit(1).All(ctx) + nodes, err := ciq.Limit(1).All(setContextOp(ctx, ciq.ctx, "First")) if err != nil { return nil, err } @@ -85,7 +83,7 @@ func (ciq *ConfigItemQuery) FirstX(ctx context.Context) *ConfigItem { // Returns a *NotFoundError when no ConfigItem ID was found. func (ciq *ConfigItemQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = ciq.Limit(1).IDs(ctx); err != nil { + if ids, err = ciq.Limit(1).IDs(setContextOp(ctx, ciq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -108,7 +106,7 @@ func (ciq *ConfigItemQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one ConfigItem entity is found. // Returns a *NotFoundError when no ConfigItem entities are found. func (ciq *ConfigItemQuery) Only(ctx context.Context) (*ConfigItem, error) { - nodes, err := ciq.Limit(2).All(ctx) + nodes, err := ciq.Limit(2).All(setContextOp(ctx, ciq.ctx, "Only")) if err != nil { return nil, err } @@ -136,7 +134,7 @@ func (ciq *ConfigItemQuery) OnlyX(ctx context.Context) *ConfigItem { // Returns a *NotFoundError when no entities are found. func (ciq *ConfigItemQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = ciq.Limit(2).IDs(ctx); err != nil { + if ids, err = ciq.Limit(2).IDs(setContextOp(ctx, ciq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -161,10 +159,12 @@ func (ciq *ConfigItemQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of ConfigItems. func (ciq *ConfigItemQuery) All(ctx context.Context) ([]*ConfigItem, error) { + ctx = setContextOp(ctx, ciq.ctx, "All") if err := ciq.prepareQuery(ctx); err != nil { return nil, err } - return ciq.sqlAll(ctx) + qr := querierAll[[]*ConfigItem, *ConfigItemQuery]() + return withInterceptors[[]*ConfigItem](ctx, ciq, qr, ciq.inters) } // AllX is like All, but panics if an error occurs. @@ -177,9 +177,12 @@ func (ciq *ConfigItemQuery) AllX(ctx context.Context) []*ConfigItem { } // IDs executes the query and returns a list of ConfigItem IDs. -func (ciq *ConfigItemQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil { +func (ciq *ConfigItemQuery) IDs(ctx context.Context) (ids []int, err error) { + if ciq.ctx.Unique == nil && ciq.path != nil { + ciq.Unique(true) + } + ctx = setContextOp(ctx, ciq.ctx, "IDs") + if err = ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -196,10 +199,11 @@ func (ciq *ConfigItemQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (ciq *ConfigItemQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, ciq.ctx, "Count") if err := ciq.prepareQuery(ctx); err != nil { return 0, err } - return ciq.sqlCount(ctx) + return withInterceptors[int](ctx, ciq, querierCount[*ConfigItemQuery](), ciq.inters) } // CountX is like Count, but panics if an error occurs. @@ -213,10 +217,15 @@ func (ciq *ConfigItemQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (ciq *ConfigItemQuery) Exist(ctx context.Context) (bool, error) { - if err := ciq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, ciq.ctx, "Exist") + switch _, err := ciq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return ciq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -236,14 +245,13 @@ func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery { } return &ConfigItemQuery{ config: ciq.config, - limit: ciq.limit, - offset: ciq.offset, - order: append([]OrderFunc{}, ciq.order...), + ctx: ciq.ctx.Clone(), + order: append([]configitem.OrderOption{}, ciq.order...), + inters: append([]Interceptor{}, ciq.inters...), predicates: append([]predicate.ConfigItem{}, ciq.predicates...), // clone intermediate query. - sql: ciq.sql.Clone(), - path: ciq.path, - unique: ciq.unique, + sql: ciq.sql.Clone(), + path: ciq.path, } } @@ -262,16 +270,11 @@ func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemGroupBy { - grbuild := &ConfigItemGroupBy{config: ciq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := ciq.prepareQuery(ctx); err != nil { - return nil, err - } - return ciq.sqlQuery(ctx), nil - } + ciq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ConfigItemGroupBy{build: ciq} + grbuild.flds = &ciq.ctx.Fields grbuild.label = configitem.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -288,15 +291,30 @@ func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemG // Select(configitem.FieldCreatedAt). // Scan(ctx, &v) func (ciq *ConfigItemQuery) Select(fields ...string) *ConfigItemSelect { - ciq.fields = append(ciq.fields, fields...) - selbuild := &ConfigItemSelect{ConfigItemQuery: ciq} - selbuild.label = configitem.Label - selbuild.flds, selbuild.scan = &ciq.fields, selbuild.Scan - return selbuild + ciq.ctx.Fields = append(ciq.ctx.Fields, fields...) + sbuild := &ConfigItemSelect{ConfigItemQuery: ciq} + sbuild.label = configitem.Label + sbuild.flds, sbuild.scan = &ciq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ConfigItemSelect configured with the given aggregations. +func (ciq *ConfigItemQuery) Aggregate(fns ...AggregateFunc) *ConfigItemSelect { + return ciq.Select().Aggregate(fns...) } func (ciq *ConfigItemQuery) prepareQuery(ctx context.Context) error { - for _, f := range ciq.fields { + for _, inter := range ciq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, ciq); err != nil { + return err + } + } + } + for _, f := range ciq.ctx.Fields { if !configitem.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -338,41 +356,22 @@ func (ciq *ConfigItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* func (ciq *ConfigItemQuery) sqlCount(ctx context.Context) (int, error) { _spec := ciq.querySpec() - _spec.Node.Columns = ciq.fields - if len(ciq.fields) > 0 { - _spec.Unique = ciq.unique != nil && *ciq.unique + _spec.Node.Columns = ciq.ctx.Fields + if len(ciq.ctx.Fields) > 0 { + _spec.Unique = ciq.ctx.Unique != nil && *ciq.ctx.Unique } return sqlgraph.CountNodes(ctx, ciq.driver, _spec) } -func (ciq *ConfigItemQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := ciq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - Columns: configitem.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - From: ciq.sql, - Unique: true, - } - if unique := ciq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) + _spec.From = ciq.sql + if unique := ciq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if ciq.path != nil { + _spec.Unique = true } - if fields := ciq.fields; len(fields) > 0 { + if fields := ciq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, configitem.FieldID) for i := range fields { @@ -388,10 +387,10 @@ func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := ciq.limit; limit != nil { + if limit := ciq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := ciq.offset; offset != nil { + if offset := ciq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := ciq.order; len(ps) > 0 { @@ -407,7 +406,7 @@ func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec { func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(ciq.driver.Dialect()) t1 := builder.Table(configitem.Table) - columns := ciq.fields + columns := ciq.ctx.Fields if len(columns) == 0 { columns = configitem.Columns } @@ -416,7 +415,7 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = ciq.sql selector.Select(selector.Columns(columns...)...) } - if ciq.unique != nil && *ciq.unique { + if ciq.ctx.Unique != nil && *ciq.ctx.Unique { selector.Distinct() } for _, p := range ciq.predicates { @@ -425,12 +424,12 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range ciq.order { p(selector) } - if offset := ciq.offset; offset != nil { + if offset := ciq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := ciq.limit; limit != nil { + if limit := ciq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -438,13 +437,8 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { // ConfigItemGroupBy is the group-by builder for ConfigItem entities. type ConfigItemGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *ConfigItemQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -453,74 +447,77 @@ func (cigb *ConfigItemGroupBy) Aggregate(fns ...AggregateFunc) *ConfigItemGroupB return cigb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (cigb *ConfigItemGroupBy) Scan(ctx context.Context, v any) error { - query, err := cigb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, cigb.build.ctx, "GroupBy") + if err := cigb.build.prepareQuery(ctx); err != nil { return err } - cigb.sql = query - return cigb.sqlScan(ctx, v) + return scanWithInterceptors[*ConfigItemQuery, *ConfigItemGroupBy](ctx, cigb.build, cigb, cigb.build.inters, v) } -func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range cigb.fields { - if !configitem.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, root *ConfigItemQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cigb.fns)) + for _, fn := range cigb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cigb.flds)+len(cigb.fns)) + for _, f := range *cigb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := cigb.sqlQuery() + selector.GroupBy(selector.Columns(*cigb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := cigb.driver.Query(ctx, query, args, rows); err != nil { + if err := cigb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (cigb *ConfigItemGroupBy) sqlQuery() *sql.Selector { - selector := cigb.sql.Select() - aggregation := make([]string, 0, len(cigb.fns)) - for _, fn := range cigb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(cigb.fields)+len(cigb.fns)) - for _, f := range cigb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(cigb.fields...)...) -} - // ConfigItemSelect is the builder for selecting fields of ConfigItem entities. type ConfigItemSelect struct { *ConfigItemQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cis *ConfigItemSelect) Aggregate(fns ...AggregateFunc) *ConfigItemSelect { + cis.fns = append(cis.fns, fns...) + return cis } // Scan applies the selector query and scans the result into the given value. func (cis *ConfigItemSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cis.ctx, "Select") if err := cis.prepareQuery(ctx); err != nil { return err } - cis.sql = cis.ConfigItemQuery.sqlQuery(ctx) - return cis.sqlScan(ctx, v) + return scanWithInterceptors[*ConfigItemQuery, *ConfigItemSelect](ctx, cis.ConfigItemQuery, cis, cis.inters, v) } -func (cis *ConfigItemSelect) sqlScan(ctx context.Context, v any) error { +func (cis *ConfigItemSelect) sqlScan(ctx context.Context, root *ConfigItemQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cis.fns)) + for _, fn := range cis.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cis.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := cis.sql.Query() + query, args := selector.Query() if err := cis.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index e591347a0c3..0db3a0b5233 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -71,35 +71,8 @@ func (ciu *ConfigItemUpdate) Mutation() *ConfigItemMutation { // Save executes the query and returns the number of nodes affected by the update operation. func (ciu *ConfigItemUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) ciu.defaults() - if len(ciu.hooks) == 0 { - affected, err = ciu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ciu.mutation = mutation - affected, err = ciu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(ciu.hooks) - 1; i >= 0; i-- { - if ciu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ciu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ciu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ciu.sqlSave, ciu.mutation, ciu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -137,16 +110,7 @@ func (ciu *ConfigItemUpdate) defaults() { } func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - Columns: configitem.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) if ps := ciu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -155,44 +119,22 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := ciu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldCreatedAt, - }) + _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) } if ciu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldCreatedAt, - }) + _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) } if value, ok := ciu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldUpdatedAt, - }) + _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } if ciu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldUpdatedAt, - }) + _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) } if value, ok := ciu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldName, - }) + _spec.SetField(configitem.FieldName, field.TypeString, value) } if value, ok := ciu.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldValue, - }) + _spec.SetField(configitem.FieldValue, field.TypeString, value) } if n, err = sqlgraph.UpdateNodes(ctx, ciu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { @@ -202,6 +144,7 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + ciu.mutation.done = true return n, nil } @@ -254,6 +197,12 @@ func (ciuo *ConfigItemUpdateOne) Mutation() *ConfigItemMutation { return ciuo.mutation } +// Where appends a list predicates to the ConfigItemUpdate builder. +func (ciuo *ConfigItemUpdateOne) Where(ps ...predicate.ConfigItem) *ConfigItemUpdateOne { + ciuo.mutation.Where(ps...) + return ciuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigItemUpdateOne { @@ -263,41 +212,8 @@ func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigI // Save executes the query and returns the updated ConfigItem entity. func (ciuo *ConfigItemUpdateOne) Save(ctx context.Context) (*ConfigItem, error) { - var ( - err error - node *ConfigItem - ) ciuo.defaults() - if len(ciuo.hooks) == 0 { - node, err = ciuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ciuo.mutation = mutation - node, err = ciuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(ciuo.hooks) - 1; i >= 0; i-- { - if ciuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ciuo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, ciuo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*ConfigItem) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from ConfigItemMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, ciuo.sqlSave, ciuo.mutation, ciuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -335,16 +251,7 @@ func (ciuo *ConfigItemUpdateOne) defaults() { } func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - Columns: configitem.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) id, ok := ciuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ConfigItem.id" for update`)} @@ -370,44 +277,22 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } } if value, ok := ciuo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldCreatedAt, - }) + _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) } if ciuo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldCreatedAt, - }) + _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) } if value, ok := ciuo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldUpdatedAt, - }) + _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } if ciuo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldUpdatedAt, - }) + _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) } if value, ok := ciuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldName, - }) + _spec.SetField(configitem.FieldName, field.TypeString, value) } if value, ok := ciuo.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldValue, - }) + _spec.SetField(configitem.FieldValue, field.TypeString, value) } _node = &ConfigItem{config: ciuo.config} _spec.Assign = _node.assignValues @@ -420,5 +305,6 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } return nil, err } + ciuo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/context.go b/pkg/database/ent/context.go deleted file mode 100644 index 7811bfa2349..00000000000 --- a/pkg/database/ent/context.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" -) - -type clientCtxKey struct{} - -// FromContext returns a Client stored inside a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Client { - c, _ := ctx.Value(clientCtxKey{}).(*Client) - return c -} - -// NewContext returns a new context with the given Client attached. -func NewContext(parent context.Context, c *Client) context.Context { - return context.WithValue(parent, clientCtxKey{}, c) -} - -type txCtxKey struct{} - -// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. -func TxFromContext(ctx context.Context) *Tx { - tx, _ := ctx.Value(txCtxKey{}).(*Tx) - return tx -} - -// NewTxContext returns a new context with the given Tx attached. -func NewTxContext(parent context.Context, tx *Tx) context.Context { - return context.WithValue(parent, txCtxKey{}, tx) -} diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index c969e576724..8a08bc1dfd4 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -51,7 +52,8 @@ type Decision struct { AlertDecisions int `json:"alert_decisions,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the DecisionQuery when eager-loading is set. - Edges DecisionEdges `json:"edges"` + Edges DecisionEdges `json:"edges"` + selectValues sql.SelectValues } // DecisionEdges holds the relations/edges for other nodes in the graph. @@ -90,7 +92,7 @@ func (*Decision) scanValues(columns []string) ([]any, error) { case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Decision", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -209,21 +211,29 @@ func (d *Decision) assignValues(columns []string, values []any) error { } else if value.Valid { d.AlertDecisions = int(value.Int64) } + default: + d.selectValues.Set(columns[i], values[i]) } } return nil } +// GetValue returns the ent.Value that was dynamically selected and assigned to the Decision. +// This includes values selected through modifiers, order, etc. +func (d *Decision) GetValue(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Decision entity. func (d *Decision) QueryOwner() *AlertQuery { - return (&DecisionClient{config: d.config}).QueryOwner(d) + return NewDecisionClient(d.config).QueryOwner(d) } // Update returns a builder for updating this Decision. // Note that you need to call Decision.Unwrap() before calling this method if this Decision // was returned from a transaction, and the transaction was committed or rolled back. func (d *Decision) Update() *DecisionUpdateOne { - return (&DecisionClient{config: d.config}).UpdateOne(d) + return NewDecisionClient(d.config).UpdateOne(d) } // Unwrap unwraps the Decision entity that was returned from a transaction after it was closed, @@ -301,9 +311,3 @@ func (d *Decision) String() string { // Decisions is a parsable slice of Decision. type Decisions []*Decision - -func (d Decisions) config(cfg config) { - for _i := range d { - d[_i].config = cfg - } -} diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go index a0012d940a8..d9f67623bd8 100644 --- a/pkg/database/ent/decision/decision.go +++ b/pkg/database/ent/decision/decision.go @@ -4,6 +4,9 @@ package decision import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -99,3 +102,105 @@ var ( // DefaultSimulated holds the default value on creation for the "simulated" field. DefaultSimulated bool ) + +// OrderOption defines the ordering options for the Decision queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUntil orders the results by the until field. +func ByUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUntil, opts...).ToFunc() +} + +// ByScenario orders the results by the scenario field. +func ByScenario(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenario, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByStartIP orders the results by the start_ip field. +func ByStartIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartIP, opts...).ToFunc() +} + +// ByEndIP orders the results by the end_ip field. +func ByEndIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndIP, opts...).ToFunc() +} + +// ByStartSuffix orders the results by the start_suffix field. +func ByStartSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartSuffix, opts...).ToFunc() +} + +// ByEndSuffix orders the results by the end_suffix field. +func ByEndSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndSuffix, opts...).ToFunc() +} + +// ByIPSize orders the results by the ip_size field. +func ByIPSize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPSize, opts...).ToFunc() +} + +// ByScope orders the results by the scope field. +func ByScope(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScope, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByOrigin orders the results by the origin field. +func ByOrigin(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOrigin, opts...).ToFunc() +} + +// BySimulated orders the results by the simulated field. +func BySimulated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSimulated, opts...).ToFunc() +} + +// ByUUID orders the results by the uuid field. +func ByUUID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUUID, opts...).ToFunc() +} + +// ByAlertDecisions orders the results by the alert_decisions field. +func ByAlertDecisions(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAlertDecisions, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go index 18716a4a7c1..36374f5714d 100644 --- a/pkg/database/ent/decision/where.go +++ b/pkg/database/ent/decision/where.go @@ -12,1481 +12,967 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) } // Until applies equality check predicate on the "until" field. It's identical to UntilEQ. func Until(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUntil, v)) } // Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. func Scenario(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScenario, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldType, v)) } // StartIP applies equality check predicate on the "start_ip" field. It's identical to StartIPEQ. func StartIP(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartIP, v)) } // EndIP applies equality check predicate on the "end_ip" field. It's identical to EndIPEQ. func EndIP(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndIP, v)) } // StartSuffix applies equality check predicate on the "start_suffix" field. It's identical to StartSuffixEQ. func StartSuffix(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartSuffix, v)) } // EndSuffix applies equality check predicate on the "end_suffix" field. It's identical to EndSuffixEQ. func EndSuffix(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndSuffix, v)) } // IPSize applies equality check predicate on the "ip_size" field. It's identical to IPSizeEQ. func IPSize(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldIPSize, v)) } // Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ. func Scope(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScope, v)) } // Value applies equality check predicate on the "value" field. It's identical to ValueEQ. func Value(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldValue, v)) } // Origin applies equality check predicate on the "origin" field. It's identical to OriginEQ. func Origin(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldOrigin, v)) } // Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. func Simulated(v bool) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldSimulated, v)) } // UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ. func UUID(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUUID, v)) } // AlertDecisions applies equality check predicate on the "alert_decisions" field. It's identical to AlertDecisionsEQ. func AlertDecisions(v int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertDecisions), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldAlertDecisions, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Decision(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Decision(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Decision(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Decision(sql.FieldNotNull(FieldUpdatedAt)) } // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUntil, v)) } // UntilNEQ applies the NEQ predicate on the "until" field. func UntilNEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldUntil, v)) } // UntilIn applies the In predicate on the "until" field. func UntilIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUntil), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldUntil, vs...)) } // UntilNotIn applies the NotIn predicate on the "until" field. func UntilNotIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUntil), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldUntil, vs...)) } // UntilGT applies the GT predicate on the "until" field. func UntilGT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldGT(FieldUntil, v)) } // UntilGTE applies the GTE predicate on the "until" field. func UntilGTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldUntil, v)) } // UntilLT applies the LT predicate on the "until" field. func UntilLT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldLT(FieldUntil, v)) } // UntilLTE applies the LTE predicate on the "until" field. func UntilLTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldUntil, v)) } // UntilIsNil applies the IsNil predicate on the "until" field. func UntilIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUntil))) - }) + return predicate.Decision(sql.FieldIsNull(FieldUntil)) } // UntilNotNil applies the NotNil predicate on the "until" field. func UntilNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUntil))) - }) + return predicate.Decision(sql.FieldNotNull(FieldUntil)) } // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScenario, v)) } // ScenarioNEQ applies the NEQ predicate on the "scenario" field. func ScenarioNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldScenario, v)) } // ScenarioIn applies the In predicate on the "scenario" field. func ScenarioIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenario), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldScenario, vs...)) } // ScenarioNotIn applies the NotIn predicate on the "scenario" field. func ScenarioNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenario), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldScenario, vs...)) } // ScenarioGT applies the GT predicate on the "scenario" field. func ScenarioGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldGT(FieldScenario, v)) } // ScenarioGTE applies the GTE predicate on the "scenario" field. func ScenarioGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldScenario, v)) } // ScenarioLT applies the LT predicate on the "scenario" field. func ScenarioLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldLT(FieldScenario, v)) } // ScenarioLTE applies the LTE predicate on the "scenario" field. func ScenarioLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldScenario, v)) } // ScenarioContains applies the Contains predicate on the "scenario" field. func ScenarioContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldContains(FieldScenario, v)) } // ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. func ScenarioHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldScenario, v)) } // ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. func ScenarioHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldScenario, v)) } // ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. func ScenarioEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldScenario, v)) } // ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. func ScenarioContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldScenario, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldType, v)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldType, v)) } // StartIPEQ applies the EQ predicate on the "start_ip" field. func StartIPEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartIP, v)) } // StartIPNEQ applies the NEQ predicate on the "start_ip" field. func StartIPNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldStartIP, v)) } // StartIPIn applies the In predicate on the "start_ip" field. func StartIPIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStartIP), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldStartIP, vs...)) } // StartIPNotIn applies the NotIn predicate on the "start_ip" field. func StartIPNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStartIP), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldStartIP, vs...)) } // StartIPGT applies the GT predicate on the "start_ip" field. func StartIPGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldGT(FieldStartIP, v)) } // StartIPGTE applies the GTE predicate on the "start_ip" field. func StartIPGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldStartIP, v)) } // StartIPLT applies the LT predicate on the "start_ip" field. func StartIPLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldLT(FieldStartIP, v)) } // StartIPLTE applies the LTE predicate on the "start_ip" field. func StartIPLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldStartIP, v)) } // StartIPIsNil applies the IsNil predicate on the "start_ip" field. func StartIPIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartIP))) - }) + return predicate.Decision(sql.FieldIsNull(FieldStartIP)) } // StartIPNotNil applies the NotNil predicate on the "start_ip" field. func StartIPNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartIP))) - }) + return predicate.Decision(sql.FieldNotNull(FieldStartIP)) } // EndIPEQ applies the EQ predicate on the "end_ip" field. func EndIPEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndIP, v)) } // EndIPNEQ applies the NEQ predicate on the "end_ip" field. func EndIPNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldEndIP, v)) } // EndIPIn applies the In predicate on the "end_ip" field. func EndIPIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldEndIP), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldEndIP, vs...)) } // EndIPNotIn applies the NotIn predicate on the "end_ip" field. func EndIPNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldEndIP), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldEndIP, vs...)) } // EndIPGT applies the GT predicate on the "end_ip" field. func EndIPGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldGT(FieldEndIP, v)) } // EndIPGTE applies the GTE predicate on the "end_ip" field. func EndIPGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldEndIP, v)) } // EndIPLT applies the LT predicate on the "end_ip" field. func EndIPLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldLT(FieldEndIP, v)) } // EndIPLTE applies the LTE predicate on the "end_ip" field. func EndIPLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldEndIP, v)) } // EndIPIsNil applies the IsNil predicate on the "end_ip" field. func EndIPIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEndIP))) - }) + return predicate.Decision(sql.FieldIsNull(FieldEndIP)) } // EndIPNotNil applies the NotNil predicate on the "end_ip" field. func EndIPNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEndIP))) - }) + return predicate.Decision(sql.FieldNotNull(FieldEndIP)) } // StartSuffixEQ applies the EQ predicate on the "start_suffix" field. func StartSuffixEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartSuffix, v)) } // StartSuffixNEQ applies the NEQ predicate on the "start_suffix" field. func StartSuffixNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldStartSuffix, v)) } // StartSuffixIn applies the In predicate on the "start_suffix" field. func StartSuffixIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStartSuffix), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldStartSuffix, vs...)) } // StartSuffixNotIn applies the NotIn predicate on the "start_suffix" field. func StartSuffixNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStartSuffix), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldStartSuffix, vs...)) } // StartSuffixGT applies the GT predicate on the "start_suffix" field. func StartSuffixGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldGT(FieldStartSuffix, v)) } // StartSuffixGTE applies the GTE predicate on the "start_suffix" field. func StartSuffixGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldStartSuffix, v)) } // StartSuffixLT applies the LT predicate on the "start_suffix" field. func StartSuffixLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldLT(FieldStartSuffix, v)) } // StartSuffixLTE applies the LTE predicate on the "start_suffix" field. func StartSuffixLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldStartSuffix, v)) } // StartSuffixIsNil applies the IsNil predicate on the "start_suffix" field. func StartSuffixIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartSuffix))) - }) + return predicate.Decision(sql.FieldIsNull(FieldStartSuffix)) } // StartSuffixNotNil applies the NotNil predicate on the "start_suffix" field. func StartSuffixNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartSuffix))) - }) + return predicate.Decision(sql.FieldNotNull(FieldStartSuffix)) } // EndSuffixEQ applies the EQ predicate on the "end_suffix" field. func EndSuffixEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndSuffix, v)) } // EndSuffixNEQ applies the NEQ predicate on the "end_suffix" field. func EndSuffixNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldEndSuffix, v)) } // EndSuffixIn applies the In predicate on the "end_suffix" field. func EndSuffixIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldEndSuffix), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldEndSuffix, vs...)) } // EndSuffixNotIn applies the NotIn predicate on the "end_suffix" field. func EndSuffixNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldEndSuffix), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldEndSuffix, vs...)) } // EndSuffixGT applies the GT predicate on the "end_suffix" field. func EndSuffixGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldGT(FieldEndSuffix, v)) } // EndSuffixGTE applies the GTE predicate on the "end_suffix" field. func EndSuffixGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldEndSuffix, v)) } // EndSuffixLT applies the LT predicate on the "end_suffix" field. func EndSuffixLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldLT(FieldEndSuffix, v)) } // EndSuffixLTE applies the LTE predicate on the "end_suffix" field. func EndSuffixLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldEndSuffix, v)) } // EndSuffixIsNil applies the IsNil predicate on the "end_suffix" field. func EndSuffixIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEndSuffix))) - }) + return predicate.Decision(sql.FieldIsNull(FieldEndSuffix)) } // EndSuffixNotNil applies the NotNil predicate on the "end_suffix" field. func EndSuffixNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEndSuffix))) - }) + return predicate.Decision(sql.FieldNotNull(FieldEndSuffix)) } // IPSizeEQ applies the EQ predicate on the "ip_size" field. func IPSizeEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldIPSize, v)) } // IPSizeNEQ applies the NEQ predicate on the "ip_size" field. func IPSizeNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldIPSize, v)) } // IPSizeIn applies the In predicate on the "ip_size" field. func IPSizeIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldIPSize), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldIPSize, vs...)) } // IPSizeNotIn applies the NotIn predicate on the "ip_size" field. func IPSizeNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldIPSize), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldIPSize, vs...)) } // IPSizeGT applies the GT predicate on the "ip_size" field. func IPSizeGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldGT(FieldIPSize, v)) } // IPSizeGTE applies the GTE predicate on the "ip_size" field. func IPSizeGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldIPSize, v)) } // IPSizeLT applies the LT predicate on the "ip_size" field. func IPSizeLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldLT(FieldIPSize, v)) } // IPSizeLTE applies the LTE predicate on the "ip_size" field. func IPSizeLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldIPSize, v)) } // IPSizeIsNil applies the IsNil predicate on the "ip_size" field. func IPSizeIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldIPSize))) - }) + return predicate.Decision(sql.FieldIsNull(FieldIPSize)) } // IPSizeNotNil applies the NotNil predicate on the "ip_size" field. func IPSizeNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldIPSize))) - }) + return predicate.Decision(sql.FieldNotNull(FieldIPSize)) } // ScopeEQ applies the EQ predicate on the "scope" field. func ScopeEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScope, v)) } // ScopeNEQ applies the NEQ predicate on the "scope" field. func ScopeNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldScope, v)) } // ScopeIn applies the In predicate on the "scope" field. func ScopeIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScope), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldScope, vs...)) } // ScopeNotIn applies the NotIn predicate on the "scope" field. func ScopeNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScope), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldScope, vs...)) } // ScopeGT applies the GT predicate on the "scope" field. func ScopeGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldGT(FieldScope, v)) } // ScopeGTE applies the GTE predicate on the "scope" field. func ScopeGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldScope, v)) } // ScopeLT applies the LT predicate on the "scope" field. func ScopeLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldLT(FieldScope, v)) } // ScopeLTE applies the LTE predicate on the "scope" field. func ScopeLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldScope, v)) } // ScopeContains applies the Contains predicate on the "scope" field. func ScopeContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldContains(FieldScope, v)) } // ScopeHasPrefix applies the HasPrefix predicate on the "scope" field. func ScopeHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldScope, v)) } // ScopeHasSuffix applies the HasSuffix predicate on the "scope" field. func ScopeHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldScope, v)) } // ScopeEqualFold applies the EqualFold predicate on the "scope" field. func ScopeEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldScope, v)) } // ScopeContainsFold applies the ContainsFold predicate on the "scope" field. func ScopeContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldScope, v)) } // ValueEQ applies the EQ predicate on the "value" field. func ValueEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldValue, v)) } // ValueNEQ applies the NEQ predicate on the "value" field. func ValueNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldValue, v)) } // ValueIn applies the In predicate on the "value" field. func ValueIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldValue), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldValue, vs...)) } // ValueNotIn applies the NotIn predicate on the "value" field. func ValueNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldValue), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldValue, vs...)) } // ValueGT applies the GT predicate on the "value" field. func ValueGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldGT(FieldValue, v)) } // ValueGTE applies the GTE predicate on the "value" field. func ValueGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldValue, v)) } // ValueLT applies the LT predicate on the "value" field. func ValueLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldLT(FieldValue, v)) } // ValueLTE applies the LTE predicate on the "value" field. func ValueLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldValue, v)) } // ValueContains applies the Contains predicate on the "value" field. func ValueContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldContains(FieldValue, v)) } // ValueHasPrefix applies the HasPrefix predicate on the "value" field. func ValueHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldValue, v)) } // ValueHasSuffix applies the HasSuffix predicate on the "value" field. func ValueHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldValue, v)) } // ValueEqualFold applies the EqualFold predicate on the "value" field. func ValueEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldValue, v)) } // ValueContainsFold applies the ContainsFold predicate on the "value" field. func ValueContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldValue, v)) } // OriginEQ applies the EQ predicate on the "origin" field. func OriginEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldOrigin, v)) } // OriginNEQ applies the NEQ predicate on the "origin" field. func OriginNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldOrigin, v)) } // OriginIn applies the In predicate on the "origin" field. func OriginIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldOrigin), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldOrigin, vs...)) } // OriginNotIn applies the NotIn predicate on the "origin" field. func OriginNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldOrigin), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldOrigin, vs...)) } // OriginGT applies the GT predicate on the "origin" field. func OriginGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldGT(FieldOrigin, v)) } // OriginGTE applies the GTE predicate on the "origin" field. func OriginGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldOrigin, v)) } // OriginLT applies the LT predicate on the "origin" field. func OriginLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldLT(FieldOrigin, v)) } // OriginLTE applies the LTE predicate on the "origin" field. func OriginLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldOrigin, v)) } // OriginContains applies the Contains predicate on the "origin" field. func OriginContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldContains(FieldOrigin, v)) } // OriginHasPrefix applies the HasPrefix predicate on the "origin" field. func OriginHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldOrigin, v)) } // OriginHasSuffix applies the HasSuffix predicate on the "origin" field. func OriginHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldOrigin, v)) } // OriginEqualFold applies the EqualFold predicate on the "origin" field. func OriginEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldOrigin, v)) } // OriginContainsFold applies the ContainsFold predicate on the "origin" field. func OriginContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldOrigin, v)) } // SimulatedEQ applies the EQ predicate on the "simulated" field. func SimulatedEQ(v bool) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldSimulated, v)) } // SimulatedNEQ applies the NEQ predicate on the "simulated" field. func SimulatedNEQ(v bool) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSimulated), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldSimulated, v)) } // UUIDEQ applies the EQ predicate on the "uuid" field. func UUIDEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUUID, v)) } // UUIDNEQ applies the NEQ predicate on the "uuid" field. func UUIDNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldUUID, v)) } // UUIDIn applies the In predicate on the "uuid" field. func UUIDIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUUID), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldUUID, vs...)) } // UUIDNotIn applies the NotIn predicate on the "uuid" field. func UUIDNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUUID), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldUUID, vs...)) } // UUIDGT applies the GT predicate on the "uuid" field. func UUIDGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldGT(FieldUUID, v)) } // UUIDGTE applies the GTE predicate on the "uuid" field. func UUIDGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldUUID, v)) } // UUIDLT applies the LT predicate on the "uuid" field. func UUIDLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldLT(FieldUUID, v)) } // UUIDLTE applies the LTE predicate on the "uuid" field. func UUIDLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldUUID, v)) } // UUIDContains applies the Contains predicate on the "uuid" field. func UUIDContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldContains(FieldUUID, v)) } // UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field. func UUIDHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldUUID, v)) } // UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field. func UUIDHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldUUID, v)) } // UUIDIsNil applies the IsNil predicate on the "uuid" field. func UUIDIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUUID))) - }) + return predicate.Decision(sql.FieldIsNull(FieldUUID)) } // UUIDNotNil applies the NotNil predicate on the "uuid" field. func UUIDNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUUID))) - }) + return predicate.Decision(sql.FieldNotNull(FieldUUID)) } // UUIDEqualFold applies the EqualFold predicate on the "uuid" field. func UUIDEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldUUID, v)) } // UUIDContainsFold applies the ContainsFold predicate on the "uuid" field. func UUIDContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldUUID, v)) } // AlertDecisionsEQ applies the EQ predicate on the "alert_decisions" field. func AlertDecisionsEQ(v int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertDecisions), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldAlertDecisions, v)) } // AlertDecisionsNEQ applies the NEQ predicate on the "alert_decisions" field. func AlertDecisionsNEQ(v int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAlertDecisions), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldAlertDecisions, v)) } // AlertDecisionsIn applies the In predicate on the "alert_decisions" field. func AlertDecisionsIn(vs ...int) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAlertDecisions), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldAlertDecisions, vs...)) } // AlertDecisionsNotIn applies the NotIn predicate on the "alert_decisions" field. func AlertDecisionsNotIn(vs ...int) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAlertDecisions), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldAlertDecisions, vs...)) } // AlertDecisionsIsNil applies the IsNil predicate on the "alert_decisions" field. func AlertDecisionsIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAlertDecisions))) - }) + return predicate.Decision(sql.FieldIsNull(FieldAlertDecisions)) } // AlertDecisionsNotNil applies the NotNil predicate on the "alert_decisions" field. func AlertDecisionsNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAlertDecisions))) - }) + return predicate.Decision(sql.FieldNotNull(FieldAlertDecisions)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -1494,7 +980,6 @@ func HasOwner() predicate.Decision { return predicate.Decision(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1504,11 +989,7 @@ func HasOwner() predicate.Decision { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Alert) predicate.Decision { return predicate.Decision(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1519,32 +1000,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Decision { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Decision) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Decision(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Decision) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Decision(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Decision) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Decision(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go index 64238cb7003..43a28c53114 100644 --- a/pkg/database/ent/decision_create.go +++ b/pkg/database/ent/decision_create.go @@ -231,50 +231,8 @@ func (dc *DecisionCreate) Mutation() *DecisionMutation { // Save creates the Decision in the database. func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) { - var ( - err error - node *Decision - ) dc.defaults() - if len(dc.hooks) == 0 { - if err = dc.check(); err != nil { - return nil, err - } - node, err = dc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = dc.check(); err != nil { - return nil, err - } - dc.mutation = mutation - if node, err = dc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(dc.hooks) - 1; i >= 0; i-- { - if dc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, dc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Decision) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -339,6 +297,9 @@ func (dc *DecisionCreate) check() error { } func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) { + if err := dc.check(); err != nil { + return nil, err + } _node, _spec := dc.createSpec() if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -348,138 +309,74 @@ func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + dc.mutation.id = &_node.ID + dc.mutation.done = true return _node, nil } func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { var ( _node = &Decision{config: dc.config} - _spec = &sqlgraph.CreateSpec{ - Table: decision.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(decision.Table, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) ) if value, ok := dc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldCreatedAt, - }) + _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := dc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUpdatedAt, - }) + _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := dc.mutation.Until(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUntil, - }) + _spec.SetField(decision.FieldUntil, field.TypeTime, value) _node.Until = &value } if value, ok := dc.mutation.Scenario(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScenario, - }) + _spec.SetField(decision.FieldScenario, field.TypeString, value) _node.Scenario = value } if value, ok := dc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldType, - }) + _spec.SetField(decision.FieldType, field.TypeString, value) _node.Type = value } if value, ok := dc.mutation.StartIP(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) _node.StartIP = value } if value, ok := dc.mutation.EndIP(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) _node.EndIP = value } if value, ok := dc.mutation.StartSuffix(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) _node.StartSuffix = value } if value, ok := dc.mutation.EndSuffix(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) _node.EndSuffix = value } if value, ok := dc.mutation.IPSize(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) _node.IPSize = value } if value, ok := dc.mutation.Scope(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScope, - }) + _spec.SetField(decision.FieldScope, field.TypeString, value) _node.Scope = value } if value, ok := dc.mutation.Value(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldValue, - }) + _spec.SetField(decision.FieldValue, field.TypeString, value) _node.Value = value } if value, ok := dc.mutation.Origin(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldOrigin, - }) + _spec.SetField(decision.FieldOrigin, field.TypeString, value) _node.Origin = value } if value, ok := dc.mutation.Simulated(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: decision.FieldSimulated, - }) + _spec.SetField(decision.FieldSimulated, field.TypeBool, value) _node.Simulated = value } if value, ok := dc.mutation.UUID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldUUID, - }) + _spec.SetField(decision.FieldUUID, field.TypeString, value) _node.UUID = value } if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 { @@ -490,10 +387,7 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -508,11 +402,15 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { // DecisionCreateBulk is the builder for creating many Decision entities in bulk. type DecisionCreateBulk struct { config + err error builders []*DecisionCreate } // Save creates the Decision entities in the database. func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) { + if dcb.err != nil { + return nil, dcb.err + } specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) nodes := make([]*Decision, len(dcb.builders)) mutators := make([]Mutator, len(dcb.builders)) @@ -529,8 +427,8 @@ func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/decision_delete.go b/pkg/database/ent/decision_delete.go index 24b494b113e..35bb8767283 100644 --- a/pkg/database/ent/decision_delete.go +++ b/pkg/database/ent/decision_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(dd.hooks) == 0 { - affected, err = dd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - dd.mutation = mutation - affected, err = dd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(dd.hooks) - 1; i >= 0; i-- { - if dd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (dd *DecisionDelete) ExecX(ctx context.Context) int { } func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(decision.Table, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) if ps := dd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + dd.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type DecisionDeleteOne struct { dd *DecisionDelete } +// Where appends a list predicates to the DecisionDelete builder. +func (ddo *DecisionDeleteOne) Where(ps ...predicate.Decision) *DecisionDeleteOne { + ddo.dd.mutation.Where(ps...) + return ddo +} + // Exec executes the deletion query. func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error { n, err := ddo.dd.Exec(ctx) @@ -111,5 +82,7 @@ func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ddo *DecisionDeleteOne) ExecX(ctx context.Context) { - ddo.dd.ExecX(ctx) + if err := ddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/decision_query.go b/pkg/database/ent/decision_query.go index 91aebded968..b050a4d9649 100644 --- a/pkg/database/ent/decision_query.go +++ b/pkg/database/ent/decision_query.go @@ -18,11 +18,9 @@ import ( // DecisionQuery is the builder for querying Decision entities. type DecisionQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []decision.OrderOption + inters []Interceptor predicates []predicate.Decision withOwner *AlertQuery // intermediate query (i.e. traversal path). @@ -36,34 +34,34 @@ func (dq *DecisionQuery) Where(ps ...predicate.Decision) *DecisionQuery { return dq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (dq *DecisionQuery) Limit(limit int) *DecisionQuery { - dq.limit = &limit + dq.ctx.Limit = &limit return dq } -// Offset adds an offset step to the query. +// Offset to start from. func (dq *DecisionQuery) Offset(offset int) *DecisionQuery { - dq.offset = &offset + dq.ctx.Offset = &offset return dq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (dq *DecisionQuery) Unique(unique bool) *DecisionQuery { - dq.unique = &unique + dq.ctx.Unique = &unique return dq } -// Order adds an order step to the query. -func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery { +// Order specifies how the records should be ordered. +func (dq *DecisionQuery) Order(o ...decision.OrderOption) *DecisionQuery { dq.order = append(dq.order, o...) return dq } // QueryOwner chains the current query on the "owner" edge. func (dq *DecisionQuery) QueryOwner() *AlertQuery { - query := &AlertQuery{config: dq.config} + query := (&AlertClient{config: dq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := dq.prepareQuery(ctx); err != nil { return nil, err @@ -86,7 +84,7 @@ func (dq *DecisionQuery) QueryOwner() *AlertQuery { // First returns the first Decision entity from the query. // Returns a *NotFoundError when no Decision was found. func (dq *DecisionQuery) First(ctx context.Context) (*Decision, error) { - nodes, err := dq.Limit(1).All(ctx) + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First")) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (dq *DecisionQuery) FirstX(ctx context.Context) *Decision { // Returns a *NotFoundError when no Decision ID was found. func (dq *DecisionQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = dq.Limit(1).IDs(ctx); err != nil { + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -132,7 +130,7 @@ func (dq *DecisionQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Decision entity is found. // Returns a *NotFoundError when no Decision entities are found. func (dq *DecisionQuery) Only(ctx context.Context) (*Decision, error) { - nodes, err := dq.Limit(2).All(ctx) + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only")) if err != nil { return nil, err } @@ -160,7 +158,7 @@ func (dq *DecisionQuery) OnlyX(ctx context.Context) *Decision { // Returns a *NotFoundError when no entities are found. func (dq *DecisionQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = dq.Limit(2).IDs(ctx); err != nil { + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -185,10 +183,12 @@ func (dq *DecisionQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Decisions. func (dq *DecisionQuery) All(ctx context.Context) ([]*Decision, error) { + ctx = setContextOp(ctx, dq.ctx, "All") if err := dq.prepareQuery(ctx); err != nil { return nil, err } - return dq.sqlAll(ctx) + qr := querierAll[[]*Decision, *DecisionQuery]() + return withInterceptors[[]*Decision](ctx, dq, qr, dq.inters) } // AllX is like All, but panics if an error occurs. @@ -201,9 +201,12 @@ func (dq *DecisionQuery) AllX(ctx context.Context) []*Decision { } // IDs executes the query and returns a list of Decision IDs. -func (dq *DecisionQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { +func (dq *DecisionQuery) IDs(ctx context.Context) (ids []int, err error) { + if dq.ctx.Unique == nil && dq.path != nil { + dq.Unique(true) + } + ctx = setContextOp(ctx, dq.ctx, "IDs") + if err = dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -220,10 +223,11 @@ func (dq *DecisionQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (dq *DecisionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dq.ctx, "Count") if err := dq.prepareQuery(ctx); err != nil { return 0, err } - return dq.sqlCount(ctx) + return withInterceptors[int](ctx, dq, querierCount[*DecisionQuery](), dq.inters) } // CountX is like Count, but panics if an error occurs. @@ -237,10 +241,15 @@ func (dq *DecisionQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (dq *DecisionQuery) Exist(ctx context.Context) (bool, error) { - if err := dq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, dq.ctx, "Exist") + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return dq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -260,22 +269,21 @@ func (dq *DecisionQuery) Clone() *DecisionQuery { } return &DecisionQuery{ config: dq.config, - limit: dq.limit, - offset: dq.offset, - order: append([]OrderFunc{}, dq.order...), + ctx: dq.ctx.Clone(), + order: append([]decision.OrderOption{}, dq.order...), + inters: append([]Interceptor{}, dq.inters...), predicates: append([]predicate.Decision{}, dq.predicates...), withOwner: dq.withOwner.Clone(), // clone intermediate query. - sql: dq.sql.Clone(), - path: dq.path, - unique: dq.unique, + sql: dq.sql.Clone(), + path: dq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery { - query := &AlertQuery{config: dq.config} + query := (&AlertClient{config: dq.config}).Query() for _, opt := range opts { opt(query) } @@ -298,16 +306,11 @@ func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupBy { - grbuild := &DecisionGroupBy{config: dq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := dq.prepareQuery(ctx); err != nil { - return nil, err - } - return dq.sqlQuery(ctx), nil - } + dq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DecisionGroupBy{build: dq} + grbuild.flds = &dq.ctx.Fields grbuild.label = decision.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -324,15 +327,30 @@ func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupB // Select(decision.FieldCreatedAt). // Scan(ctx, &v) func (dq *DecisionQuery) Select(fields ...string) *DecisionSelect { - dq.fields = append(dq.fields, fields...) - selbuild := &DecisionSelect{DecisionQuery: dq} - selbuild.label = decision.Label - selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan - return selbuild + dq.ctx.Fields = append(dq.ctx.Fields, fields...) + sbuild := &DecisionSelect{DecisionQuery: dq} + sbuild.label = decision.Label + sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DecisionSelect configured with the given aggregations. +func (dq *DecisionQuery) Aggregate(fns ...AggregateFunc) *DecisionSelect { + return dq.Select().Aggregate(fns...) } func (dq *DecisionQuery) prepareQuery(ctx context.Context) error { - for _, f := range dq.fields { + for _, inter := range dq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dq); err != nil { + return err + } + } + } + for _, f := range dq.ctx.Fields { if !decision.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -392,6 +410,9 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(alert.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -411,41 +432,22 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) { _spec := dq.querySpec() - _spec.Node.Columns = dq.fields - if len(dq.fields) > 0 { - _spec.Unique = dq.unique != nil && *dq.unique + _spec.Node.Columns = dq.ctx.Fields + if len(dq.ctx.Fields) > 0 { + _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique } return sqlgraph.CountNodes(ctx, dq.driver, _spec) } -func (dq *DecisionQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := dq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - Columns: decision.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - From: dq.sql, - Unique: true, - } - if unique := dq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) + _spec.From = dq.sql + if unique := dq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if dq.path != nil { + _spec.Unique = true } - if fields := dq.fields; len(fields) > 0 { + if fields := dq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID) for i := range fields { @@ -453,6 +455,9 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if dq.withOwner != nil { + _spec.Node.AddColumnOnce(decision.FieldAlertDecisions) + } } if ps := dq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -461,10 +466,10 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := dq.order; len(ps) > 0 { @@ -480,7 +485,7 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(dq.driver.Dialect()) t1 := builder.Table(decision.Table) - columns := dq.fields + columns := dq.ctx.Fields if len(columns) == 0 { columns = decision.Columns } @@ -489,7 +494,7 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = dq.sql selector.Select(selector.Columns(columns...)...) } - if dq.unique != nil && *dq.unique { + if dq.ctx.Unique != nil && *dq.ctx.Unique { selector.Distinct() } for _, p := range dq.predicates { @@ -498,12 +503,12 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range dq.order { p(selector) } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -511,13 +516,8 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { // DecisionGroupBy is the group-by builder for Decision entities. type DecisionGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *DecisionQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -526,74 +526,77 @@ func (dgb *DecisionGroupBy) Aggregate(fns ...AggregateFunc) *DecisionGroupBy { return dgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (dgb *DecisionGroupBy) Scan(ctx context.Context, v any) error { - query, err := dgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy") + if err := dgb.build.prepareQuery(ctx); err != nil { return err } - dgb.sql = query - return dgb.sqlScan(ctx, v) + return scanWithInterceptors[*DecisionQuery, *DecisionGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v) } -func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range dgb.fields { - if !decision.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, root *DecisionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns)) + for _, f := range *dgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := dgb.sqlQuery() + selector.GroupBy(selector.Columns(*dgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector { - selector := dgb.sql.Select() - aggregation := make([]string, 0, len(dgb.fns)) - for _, fn := range dgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) - for _, f := range dgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(dgb.fields...)...) -} - // DecisionSelect is the builder for selecting fields of Decision entities. type DecisionSelect struct { *DecisionQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ds *DecisionSelect) Aggregate(fns ...AggregateFunc) *DecisionSelect { + ds.fns = append(ds.fns, fns...) + return ds } // Scan applies the selector query and scans the result into the given value. func (ds *DecisionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ds.ctx, "Select") if err := ds.prepareQuery(ctx); err != nil { return err } - ds.sql = ds.DecisionQuery.sqlQuery(ctx) - return ds.sqlScan(ctx, v) + return scanWithInterceptors[*DecisionQuery, *DecisionSelect](ctx, ds.DecisionQuery, ds, ds.inters, v) } -func (ds *DecisionSelect) sqlScan(ctx context.Context, v any) error { +func (ds *DecisionSelect) sqlScan(ctx context.Context, root *DecisionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ds.fns)) + for _, fn := range ds.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := ds.sql.Query() + query, args := selector.Query() if err := ds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 64b40871eca..1b62cc54c30 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -324,35 +324,8 @@ func (du *DecisionUpdate) ClearOwner() *DecisionUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (du *DecisionUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) du.defaults() - if len(du.hooks) == 0 { - affected, err = du.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - du.mutation = mutation - affected, err = du.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(du.hooks) - 1; i >= 0; i-- { - if du.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = du.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, du.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -390,16 +363,7 @@ func (du *DecisionUpdate) defaults() { } func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - Columns: decision.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) if ps := du.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -408,198 +372,91 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := du.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldCreatedAt, - }) + _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) } if du.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldCreatedAt, - }) + _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) } if value, ok := du.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUpdatedAt, - }) + _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } if du.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUpdatedAt, - }) + _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) } if value, ok := du.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUntil, - }) + _spec.SetField(decision.FieldUntil, field.TypeTime, value) } if du.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUntil, - }) + _spec.ClearField(decision.FieldUntil, field.TypeTime) } if value, ok := du.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScenario, - }) + _spec.SetField(decision.FieldScenario, field.TypeString, value) } if value, ok := du.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldType, - }) + _spec.SetField(decision.FieldType, field.TypeString, value) } if value, ok := du.mutation.StartIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) } if value, ok := du.mutation.AddedStartIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) } if du.mutation.StartIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartIP, - }) + _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } if value, ok := du.mutation.EndIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) } if value, ok := du.mutation.AddedEndIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) } if du.mutation.EndIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndIP, - }) + _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } if value, ok := du.mutation.StartSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) } if value, ok := du.mutation.AddedStartSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) } if du.mutation.StartSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartSuffix, - }) + _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } if value, ok := du.mutation.EndSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) } if value, ok := du.mutation.AddedEndSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) } if du.mutation.EndSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndSuffix, - }) + _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } if value, ok := du.mutation.IPSize(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) } if value, ok := du.mutation.AddedIPSize(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) } if du.mutation.IPSizeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldIPSize, - }) + _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } if value, ok := du.mutation.Scope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScope, - }) + _spec.SetField(decision.FieldScope, field.TypeString, value) } if value, ok := du.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldValue, - }) + _spec.SetField(decision.FieldValue, field.TypeString, value) } if value, ok := du.mutation.Origin(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldOrigin, - }) + _spec.SetField(decision.FieldOrigin, field.TypeString, value) } if value, ok := du.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: decision.FieldSimulated, - }) + _spec.SetField(decision.FieldSimulated, field.TypeBool, value) } if value, ok := du.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldUUID, - }) + _spec.SetField(decision.FieldUUID, field.TypeString, value) } if du.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: decision.FieldUUID, - }) + _spec.ClearField(decision.FieldUUID, field.TypeString) } if du.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -609,10 +466,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -625,10 +479,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -644,6 +495,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + du.mutation.done = true return n, nil } @@ -948,6 +800,12 @@ func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne { return duo } +// Where appends a list predicates to the DecisionUpdate builder. +func (duo *DecisionUpdateOne) Where(ps ...predicate.Decision) *DecisionUpdateOne { + duo.mutation.Where(ps...) + return duo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUpdateOne { @@ -957,41 +815,8 @@ func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUp // Save executes the query and returns the updated Decision entity. func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) { - var ( - err error - node *Decision - ) duo.defaults() - if len(duo.hooks) == 0 { - node, err = duo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - duo.mutation = mutation - node, err = duo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(duo.hooks) - 1; i >= 0; i-- { - if duo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = duo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, duo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Decision) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1029,16 +854,7 @@ func (duo *DecisionUpdateOne) defaults() { } func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - Columns: decision.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) id, ok := duo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Decision.id" for update`)} @@ -1064,198 +880,91 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } } if value, ok := duo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldCreatedAt, - }) + _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) } if duo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldCreatedAt, - }) + _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) } if value, ok := duo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUpdatedAt, - }) + _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } if duo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUpdatedAt, - }) + _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) } if value, ok := duo.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUntil, - }) + _spec.SetField(decision.FieldUntil, field.TypeTime, value) } if duo.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUntil, - }) + _spec.ClearField(decision.FieldUntil, field.TypeTime) } if value, ok := duo.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScenario, - }) + _spec.SetField(decision.FieldScenario, field.TypeString, value) } if value, ok := duo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldType, - }) + _spec.SetField(decision.FieldType, field.TypeString, value) } if value, ok := duo.mutation.StartIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) } if value, ok := duo.mutation.AddedStartIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) } if duo.mutation.StartIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartIP, - }) + _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } if value, ok := duo.mutation.EndIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) } if value, ok := duo.mutation.AddedEndIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) } if duo.mutation.EndIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndIP, - }) + _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } if value, ok := duo.mutation.StartSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) } if value, ok := duo.mutation.AddedStartSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) } if duo.mutation.StartSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartSuffix, - }) + _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } if value, ok := duo.mutation.EndSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) } if value, ok := duo.mutation.AddedEndSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) } if duo.mutation.EndSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndSuffix, - }) + _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } if value, ok := duo.mutation.IPSize(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) } if value, ok := duo.mutation.AddedIPSize(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) } if duo.mutation.IPSizeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldIPSize, - }) + _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } if value, ok := duo.mutation.Scope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScope, - }) + _spec.SetField(decision.FieldScope, field.TypeString, value) } if value, ok := duo.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldValue, - }) + _spec.SetField(decision.FieldValue, field.TypeString, value) } if value, ok := duo.mutation.Origin(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldOrigin, - }) + _spec.SetField(decision.FieldOrigin, field.TypeString, value) } if value, ok := duo.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: decision.FieldSimulated, - }) + _spec.SetField(decision.FieldSimulated, field.TypeBool, value) } if value, ok := duo.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldUUID, - }) + _spec.SetField(decision.FieldUUID, field.TypeString, value) } if duo.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: decision.FieldUUID, - }) + _spec.ClearField(decision.FieldUUID, field.TypeString) } if duo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1265,10 +974,7 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1281,10 +987,7 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1303,5 +1006,6 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } return nil, err } + duo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index 0455af444d2..393ce9f1869 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -6,6 +6,8 @@ import ( "context" "errors" "fmt" + "reflect" + "sync" "entgo.io/ent" "entgo.io/ent/dialect/sql" @@ -21,50 +23,79 @@ import ( // ent aliases to avoid import conflicts in user's code. type ( - Op = ent.Op - Hook = ent.Hook - Value = ent.Value - Query = ent.Query - Policy = ent.Policy - Mutator = ent.Mutator - Mutation = ent.Mutation - MutateFunc = ent.MutateFunc + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc ) +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + // OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. type OrderFunc func(*sql.Selector) -// columnChecker returns a function indicates if the column exists in the given column. -func columnChecker(table string) func(string) error { - checks := map[string]func(string) bool{ - alert.Table: alert.ValidColumn, - bouncer.Table: bouncer.ValidColumn, - configitem.Table: configitem.ValidColumn, - decision.Table: decision.ValidColumn, - event.Table: event.ValidColumn, - machine.Table: machine.ValidColumn, - meta.Table: meta.ValidColumn, - } - check, ok := checks[table] - if !ok { - return func(string) error { - return fmt.Errorf("unknown table %q", table) - } - } - return func(column string) error { - if !check(column) { - return fmt.Errorf("unknown column %q for table %q", column, table) - } - return nil - } +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + alert.Table: alert.ValidColumn, + bouncer.Table: bouncer.ValidColumn, + configitem.Table: configitem.ValidColumn, + decision.Table: decision.ValidColumn, + event.Table: event.ValidColumn, + machine.Table: machine.ValidColumn, + meta.Table: meta.ValidColumn, + }) + }) + return columnCheck(table, column) } // Asc applies the given fields in ASC order. -func Asc(fields ...string) OrderFunc { +func Asc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Asc(s.C(f))) @@ -73,11 +104,10 @@ func Asc(fields ...string) OrderFunc { } // Desc applies the given fields in DESC order. -func Desc(fields ...string) OrderFunc { +func Desc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Desc(s.C(f))) @@ -109,8 +139,7 @@ func Count() AggregateFunc { // Max applies the "max" aggregation function on the given field of each group. func Max(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -121,8 +150,7 @@ func Max(field string) AggregateFunc { // Mean applies the "mean" aggregation function on the given field of each group. func Mean(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -133,8 +161,7 @@ func Mean(field string) AggregateFunc { // Min applies the "min" aggregation function on the given field of each group. func Min(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -145,8 +172,7 @@ func Min(field string) AggregateFunc { // Sum applies the "sum" aggregation function on the given field of each group. func Sum(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -275,6 +301,7 @@ func IsConstraintError(err error) bool { type selector struct { label string flds *[]string + fns []AggregateFunc scan func(context.Context, any) error } @@ -473,5 +500,121 @@ func (s *selector) BoolX(ctx context.Context) bool { return v } +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + // queryHook describes an internal hook for the different sqlAll methods. type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index 4754107fddc..df4a2d10c8b 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" @@ -29,7 +30,8 @@ type Event struct { AlertEvents int `json:"alert_events,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the EventQuery when eager-loading is set. - Edges EventEdges `json:"edges"` + Edges EventEdges `json:"edges"` + selectValues sql.SelectValues } // EventEdges holds the relations/edges for other nodes in the graph. @@ -66,7 +68,7 @@ func (*Event) scanValues(columns []string) ([]any, error) { case event.FieldCreatedAt, event.FieldUpdatedAt, event.FieldTime: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Event", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -118,21 +120,29 @@ func (e *Event) assignValues(columns []string, values []any) error { } else if value.Valid { e.AlertEvents = int(value.Int64) } + default: + e.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Event. +// This includes values selected through modifiers, order, etc. +func (e *Event) Value(name string) (ent.Value, error) { + return e.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Event entity. func (e *Event) QueryOwner() *AlertQuery { - return (&EventClient{config: e.config}).QueryOwner(e) + return NewEventClient(e.config).QueryOwner(e) } // Update returns a builder for updating this Event. // Note that you need to call Event.Unwrap() before calling this method if this Event // was returned from a transaction, and the transaction was committed or rolled back. func (e *Event) Update() *EventUpdateOne { - return (&EventClient{config: e.config}).UpdateOne(e) + return NewEventClient(e.config).UpdateOne(e) } // Unwrap unwraps the Event entity that was returned from a transaction after it was closed, @@ -175,9 +185,3 @@ func (e *Event) String() string { // Events is a parsable slice of Event. type Events []*Event - -func (e Events) config(cfg config) { - for _i := range e { - e[_i].config = cfg - } -} diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go index 33b9b67f8b9..48f5a355824 100644 --- a/pkg/database/ent/event/event.go +++ b/pkg/database/ent/event/event.go @@ -4,6 +4,9 @@ package event import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -66,3 +69,50 @@ var ( // SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. SerializedValidator func(string) error ) + +// OrderOption defines the ordering options for the Event queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByTime orders the results by the time field. +func ByTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTime, opts...).ToFunc() +} + +// BySerialized orders the results by the serialized field. +func BySerialized(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSerialized, opts...).ToFunc() +} + +// ByAlertEvents orders the results by the alert_events field. +func ByAlertEvents(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAlertEvents, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go index 7554e59e678..238bea988bd 100644 --- a/pkg/database/ent/event/where.go +++ b/pkg/database/ent/event/where.go @@ -12,477 +12,307 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Event(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) } // Time applies equality check predicate on the "time" field. It's identical to TimeEQ. func Time(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldEQ(FieldTime, v)) } // Serialized applies equality check predicate on the "serialized" field. It's identical to SerializedEQ. func Serialized(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldEQ(FieldSerialized, v)) } // AlertEvents applies equality check predicate on the "alert_events" field. It's identical to AlertEventsEQ. func AlertEvents(v int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertEvents), v)) - }) + return predicate.Event(sql.FieldEQ(FieldAlertEvents, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Event(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Event(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Event(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Event(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Event(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Event(sql.FieldNotNull(FieldUpdatedAt)) } // TimeEQ applies the EQ predicate on the "time" field. func TimeEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldEQ(FieldTime, v)) } // TimeNEQ applies the NEQ predicate on the "time" field. func TimeNEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldTime, v)) } // TimeIn applies the In predicate on the "time" field. func TimeIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldTime), v...)) - }) + return predicate.Event(sql.FieldIn(FieldTime, vs...)) } // TimeNotIn applies the NotIn predicate on the "time" field. func TimeNotIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldTime), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldTime, vs...)) } // TimeGT applies the GT predicate on the "time" field. func TimeGT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldGT(FieldTime, v)) } // TimeGTE applies the GTE predicate on the "time" field. func TimeGTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldGTE(FieldTime, v)) } // TimeLT applies the LT predicate on the "time" field. func TimeLT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldLT(FieldTime, v)) } // TimeLTE applies the LTE predicate on the "time" field. func TimeLTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldLTE(FieldTime, v)) } // SerializedEQ applies the EQ predicate on the "serialized" field. func SerializedEQ(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldEQ(FieldSerialized, v)) } // SerializedNEQ applies the NEQ predicate on the "serialized" field. func SerializedNEQ(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldSerialized, v)) } // SerializedIn applies the In predicate on the "serialized" field. func SerializedIn(vs ...string) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSerialized), v...)) - }) + return predicate.Event(sql.FieldIn(FieldSerialized, vs...)) } // SerializedNotIn applies the NotIn predicate on the "serialized" field. func SerializedNotIn(vs ...string) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSerialized), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldSerialized, vs...)) } // SerializedGT applies the GT predicate on the "serialized" field. func SerializedGT(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldGT(FieldSerialized, v)) } // SerializedGTE applies the GTE predicate on the "serialized" field. func SerializedGTE(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldGTE(FieldSerialized, v)) } // SerializedLT applies the LT predicate on the "serialized" field. func SerializedLT(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldLT(FieldSerialized, v)) } // SerializedLTE applies the LTE predicate on the "serialized" field. func SerializedLTE(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldLTE(FieldSerialized, v)) } // SerializedContains applies the Contains predicate on the "serialized" field. func SerializedContains(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldContains(FieldSerialized, v)) } // SerializedHasPrefix applies the HasPrefix predicate on the "serialized" field. func SerializedHasPrefix(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldHasPrefix(FieldSerialized, v)) } // SerializedHasSuffix applies the HasSuffix predicate on the "serialized" field. func SerializedHasSuffix(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldHasSuffix(FieldSerialized, v)) } // SerializedEqualFold applies the EqualFold predicate on the "serialized" field. func SerializedEqualFold(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldEqualFold(FieldSerialized, v)) } // SerializedContainsFold applies the ContainsFold predicate on the "serialized" field. func SerializedContainsFold(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldContainsFold(FieldSerialized, v)) } // AlertEventsEQ applies the EQ predicate on the "alert_events" field. func AlertEventsEQ(v int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertEvents), v)) - }) + return predicate.Event(sql.FieldEQ(FieldAlertEvents, v)) } // AlertEventsNEQ applies the NEQ predicate on the "alert_events" field. func AlertEventsNEQ(v int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAlertEvents), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldAlertEvents, v)) } // AlertEventsIn applies the In predicate on the "alert_events" field. func AlertEventsIn(vs ...int) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAlertEvents), v...)) - }) + return predicate.Event(sql.FieldIn(FieldAlertEvents, vs...)) } // AlertEventsNotIn applies the NotIn predicate on the "alert_events" field. func AlertEventsNotIn(vs ...int) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAlertEvents), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldAlertEvents, vs...)) } // AlertEventsIsNil applies the IsNil predicate on the "alert_events" field. func AlertEventsIsNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAlertEvents))) - }) + return predicate.Event(sql.FieldIsNull(FieldAlertEvents)) } // AlertEventsNotNil applies the NotNil predicate on the "alert_events" field. func AlertEventsNotNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAlertEvents))) - }) + return predicate.Event(sql.FieldNotNull(FieldAlertEvents)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -490,7 +320,6 @@ func HasOwner() predicate.Event { return predicate.Event(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -500,11 +329,7 @@ func HasOwner() predicate.Event { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Alert) predicate.Event { return predicate.Event(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -515,32 +340,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Event { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Event) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Event(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Event) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Event(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Event) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Event(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go index c5861305130..98194f2fd33 100644 --- a/pkg/database/ent/event_create.go +++ b/pkg/database/ent/event_create.go @@ -101,50 +101,8 @@ func (ec *EventCreate) Mutation() *EventMutation { // Save creates the Event in the database. func (ec *EventCreate) Save(ctx context.Context) (*Event, error) { - var ( - err error - node *Event - ) ec.defaults() - if len(ec.hooks) == 0 { - if err = ec.check(); err != nil { - return nil, err - } - node, err = ec.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ec.check(); err != nil { - return nil, err - } - ec.mutation = mutation - if node, err = ec.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ec.hooks) - 1; i >= 0; i-- { - if ec.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ec.hooks[i](mut) - } - v, err := mut.Mutate(ctx, ec.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Event) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, ec.sqlSave, ec.mutation, ec.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -198,6 +156,9 @@ func (ec *EventCreate) check() error { } func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) { + if err := ec.check(); err != nil { + return nil, err + } _node, _spec := ec.createSpec() if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -207,50 +168,30 @@ func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + ec.mutation.id = &_node.ID + ec.mutation.done = true return _node, nil } func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { var ( _node = &Event{config: ec.config} - _spec = &sqlgraph.CreateSpec{ - Table: event.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(event.Table, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) ) if value, ok := ec.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldCreatedAt, - }) + _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := ec.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldUpdatedAt, - }) + _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := ec.mutation.Time(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldTime, - }) + _spec.SetField(event.FieldTime, field.TypeTime, value) _node.Time = value } if value, ok := ec.mutation.Serialized(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: event.FieldSerialized, - }) + _spec.SetField(event.FieldSerialized, field.TypeString, value) _node.Serialized = value } if nodes := ec.mutation.OwnerIDs(); len(nodes) > 0 { @@ -261,10 +202,7 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -279,11 +217,15 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { // EventCreateBulk is the builder for creating many Event entities in bulk. type EventCreateBulk struct { config + err error builders []*EventCreate } // Save creates the Event entities in the database. func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) { + if ecb.err != nil { + return nil, ecb.err + } specs := make([]*sqlgraph.CreateSpec, len(ecb.builders)) nodes := make([]*Event, len(ecb.builders)) mutators := make([]Mutator, len(ecb.builders)) @@ -300,8 +242,8 @@ func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/event_delete.go b/pkg/database/ent/event_delete.go index 0220dc71d31..93dd1246b7e 100644 --- a/pkg/database/ent/event_delete.go +++ b/pkg/database/ent/event_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ed *EventDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ed.hooks) == 0 { - affected, err = ed.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ed.mutation = mutation - affected, err = ed.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ed.hooks) - 1; i >= 0; i-- { - if ed.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ed.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ed.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ed *EventDelete) ExecX(ctx context.Context) int { } func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(event.Table, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) if ps := ed.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + ed.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type EventDeleteOne struct { ed *EventDelete } +// Where appends a list predicates to the EventDelete builder. +func (edo *EventDeleteOne) Where(ps ...predicate.Event) *EventDeleteOne { + edo.ed.mutation.Where(ps...) + return edo +} + // Exec executes the deletion query. func (edo *EventDeleteOne) Exec(ctx context.Context) error { n, err := edo.ed.Exec(ctx) @@ -111,5 +82,7 @@ func (edo *EventDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (edo *EventDeleteOne) ExecX(ctx context.Context) { - edo.ed.ExecX(ctx) + if err := edo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/event_query.go b/pkg/database/ent/event_query.go index 045d750f818..1493d7bd32c 100644 --- a/pkg/database/ent/event_query.go +++ b/pkg/database/ent/event_query.go @@ -18,11 +18,9 @@ import ( // EventQuery is the builder for querying Event entities. type EventQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []event.OrderOption + inters []Interceptor predicates []predicate.Event withOwner *AlertQuery // intermediate query (i.e. traversal path). @@ -36,34 +34,34 @@ func (eq *EventQuery) Where(ps ...predicate.Event) *EventQuery { return eq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (eq *EventQuery) Limit(limit int) *EventQuery { - eq.limit = &limit + eq.ctx.Limit = &limit return eq } -// Offset adds an offset step to the query. +// Offset to start from. func (eq *EventQuery) Offset(offset int) *EventQuery { - eq.offset = &offset + eq.ctx.Offset = &offset return eq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (eq *EventQuery) Unique(unique bool) *EventQuery { - eq.unique = &unique + eq.ctx.Unique = &unique return eq } -// Order adds an order step to the query. -func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery { +// Order specifies how the records should be ordered. +func (eq *EventQuery) Order(o ...event.OrderOption) *EventQuery { eq.order = append(eq.order, o...) return eq } // QueryOwner chains the current query on the "owner" edge. func (eq *EventQuery) QueryOwner() *AlertQuery { - query := &AlertQuery{config: eq.config} + query := (&AlertClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -86,7 +84,7 @@ func (eq *EventQuery) QueryOwner() *AlertQuery { // First returns the first Event entity from the query. // Returns a *NotFoundError when no Event was found. func (eq *EventQuery) First(ctx context.Context) (*Event, error) { - nodes, err := eq.Limit(1).All(ctx) + nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First")) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (eq *EventQuery) FirstX(ctx context.Context) *Event { // Returns a *NotFoundError when no Event ID was found. func (eq *EventQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = eq.Limit(1).IDs(ctx); err != nil { + if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -132,7 +130,7 @@ func (eq *EventQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Event entity is found. // Returns a *NotFoundError when no Event entities are found. func (eq *EventQuery) Only(ctx context.Context) (*Event, error) { - nodes, err := eq.Limit(2).All(ctx) + nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only")) if err != nil { return nil, err } @@ -160,7 +158,7 @@ func (eq *EventQuery) OnlyX(ctx context.Context) *Event { // Returns a *NotFoundError when no entities are found. func (eq *EventQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = eq.Limit(2).IDs(ctx); err != nil { + if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -185,10 +183,12 @@ func (eq *EventQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Events. func (eq *EventQuery) All(ctx context.Context) ([]*Event, error) { + ctx = setContextOp(ctx, eq.ctx, "All") if err := eq.prepareQuery(ctx); err != nil { return nil, err } - return eq.sqlAll(ctx) + qr := querierAll[[]*Event, *EventQuery]() + return withInterceptors[[]*Event](ctx, eq, qr, eq.inters) } // AllX is like All, but panics if an error occurs. @@ -201,9 +201,12 @@ func (eq *EventQuery) AllX(ctx context.Context) []*Event { } // IDs executes the query and returns a list of Event IDs. -func (eq *EventQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { +func (eq *EventQuery) IDs(ctx context.Context) (ids []int, err error) { + if eq.ctx.Unique == nil && eq.path != nil { + eq.Unique(true) + } + ctx = setContextOp(ctx, eq.ctx, "IDs") + if err = eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -220,10 +223,11 @@ func (eq *EventQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (eq *EventQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, eq.ctx, "Count") if err := eq.prepareQuery(ctx); err != nil { return 0, err } - return eq.sqlCount(ctx) + return withInterceptors[int](ctx, eq, querierCount[*EventQuery](), eq.inters) } // CountX is like Count, but panics if an error occurs. @@ -237,10 +241,15 @@ func (eq *EventQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (eq *EventQuery) Exist(ctx context.Context) (bool, error) { - if err := eq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, eq.ctx, "Exist") + switch _, err := eq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return eq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -260,22 +269,21 @@ func (eq *EventQuery) Clone() *EventQuery { } return &EventQuery{ config: eq.config, - limit: eq.limit, - offset: eq.offset, - order: append([]OrderFunc{}, eq.order...), + ctx: eq.ctx.Clone(), + order: append([]event.OrderOption{}, eq.order...), + inters: append([]Interceptor{}, eq.inters...), predicates: append([]predicate.Event{}, eq.predicates...), withOwner: eq.withOwner.Clone(), // clone intermediate query. - sql: eq.sql.Clone(), - path: eq.path, - unique: eq.unique, + sql: eq.sql.Clone(), + path: eq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery { - query := &AlertQuery{config: eq.config} + query := (&AlertClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -298,16 +306,11 @@ func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy { - grbuild := &EventGroupBy{config: eq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := eq.prepareQuery(ctx); err != nil { - return nil, err - } - return eq.sqlQuery(ctx), nil - } + eq.ctx.Fields = append([]string{field}, fields...) + grbuild := &EventGroupBy{build: eq} + grbuild.flds = &eq.ctx.Fields grbuild.label = event.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -324,15 +327,30 @@ func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy { // Select(event.FieldCreatedAt). // Scan(ctx, &v) func (eq *EventQuery) Select(fields ...string) *EventSelect { - eq.fields = append(eq.fields, fields...) - selbuild := &EventSelect{EventQuery: eq} - selbuild.label = event.Label - selbuild.flds, selbuild.scan = &eq.fields, selbuild.Scan - return selbuild + eq.ctx.Fields = append(eq.ctx.Fields, fields...) + sbuild := &EventSelect{EventQuery: eq} + sbuild.label = event.Label + sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a EventSelect configured with the given aggregations. +func (eq *EventQuery) Aggregate(fns ...AggregateFunc) *EventSelect { + return eq.Select().Aggregate(fns...) } func (eq *EventQuery) prepareQuery(ctx context.Context) error { - for _, f := range eq.fields { + for _, inter := range eq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, eq); err != nil { + return err + } + } + } + for _, f := range eq.ctx.Fields { if !event.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -392,6 +410,9 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes [] } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(alert.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -411,41 +432,22 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes [] func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) { _spec := eq.querySpec() - _spec.Node.Columns = eq.fields - if len(eq.fields) > 0 { - _spec.Unique = eq.unique != nil && *eq.unique + _spec.Node.Columns = eq.ctx.Fields + if len(eq.ctx.Fields) > 0 { + _spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique } return sqlgraph.CountNodes(ctx, eq.driver, _spec) } -func (eq *EventQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := eq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - Columns: event.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, - From: eq.sql, - Unique: true, - } - if unique := eq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) + _spec.From = eq.sql + if unique := eq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if eq.path != nil { + _spec.Unique = true } - if fields := eq.fields; len(fields) > 0 { + if fields := eq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, event.FieldID) for i := range fields { @@ -453,6 +455,9 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if eq.withOwner != nil { + _spec.Node.AddColumnOnce(event.FieldAlertEvents) + } } if ps := eq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -461,10 +466,10 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := eq.limit; limit != nil { + if limit := eq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := eq.offset; offset != nil { + if offset := eq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := eq.order; len(ps) > 0 { @@ -480,7 +485,7 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(eq.driver.Dialect()) t1 := builder.Table(event.Table) - columns := eq.fields + columns := eq.ctx.Fields if len(columns) == 0 { columns = event.Columns } @@ -489,7 +494,7 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = eq.sql selector.Select(selector.Columns(columns...)...) } - if eq.unique != nil && *eq.unique { + if eq.ctx.Unique != nil && *eq.ctx.Unique { selector.Distinct() } for _, p := range eq.predicates { @@ -498,12 +503,12 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range eq.order { p(selector) } - if offset := eq.offset; offset != nil { + if offset := eq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := eq.limit; limit != nil { + if limit := eq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -511,13 +516,8 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { // EventGroupBy is the group-by builder for Event entities. type EventGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *EventQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -526,74 +526,77 @@ func (egb *EventGroupBy) Aggregate(fns ...AggregateFunc) *EventGroupBy { return egb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (egb *EventGroupBy) Scan(ctx context.Context, v any) error { - query, err := egb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, egb.build.ctx, "GroupBy") + if err := egb.build.prepareQuery(ctx); err != nil { return err } - egb.sql = query - return egb.sqlScan(ctx, v) + return scanWithInterceptors[*EventQuery, *EventGroupBy](ctx, egb.build, egb, egb.build.inters, v) } -func (egb *EventGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range egb.fields { - if !event.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (egb *EventGroupBy) sqlScan(ctx context.Context, root *EventQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(egb.fns)) + for _, fn := range egb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*egb.flds)+len(egb.fns)) + for _, f := range *egb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := egb.sqlQuery() + selector.GroupBy(selector.Columns(*egb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := egb.driver.Query(ctx, query, args, rows); err != nil { + if err := egb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (egb *EventGroupBy) sqlQuery() *sql.Selector { - selector := egb.sql.Select() - aggregation := make([]string, 0, len(egb.fns)) - for _, fn := range egb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(egb.fields)+len(egb.fns)) - for _, f := range egb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(egb.fields...)...) -} - // EventSelect is the builder for selecting fields of Event entities. type EventSelect struct { *EventQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (es *EventSelect) Aggregate(fns ...AggregateFunc) *EventSelect { + es.fns = append(es.fns, fns...) + return es } // Scan applies the selector query and scans the result into the given value. func (es *EventSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, es.ctx, "Select") if err := es.prepareQuery(ctx); err != nil { return err } - es.sql = es.EventQuery.sqlQuery(ctx) - return es.sqlScan(ctx, v) + return scanWithInterceptors[*EventQuery, *EventSelect](ctx, es.EventQuery, es, es.inters, v) } -func (es *EventSelect) sqlScan(ctx context.Context, v any) error { +func (es *EventSelect) sqlScan(ctx context.Context, root *EventQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(es.fns)) + for _, fn := range es.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*es.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := es.sql.Query() + query, args := selector.Query() if err := es.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index fcd0cc50c99..db748101519 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -117,41 +117,8 @@ func (eu *EventUpdate) ClearOwner() *EventUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (eu *EventUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) eu.defaults() - if len(eu.hooks) == 0 { - if err = eu.check(); err != nil { - return 0, err - } - affected, err = eu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = eu.check(); err != nil { - return 0, err - } - eu.mutation = mutation - affected, err = eu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(eu.hooks) - 1; i >= 0; i-- { - if eu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = eu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, eu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, eu.sqlSave, eu.mutation, eu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -199,16 +166,10 @@ func (eu *EventUpdate) check() error { } func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - Columns: event.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, + if err := eu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) if ps := eu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -217,44 +178,22 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := eu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldCreatedAt, - }) + _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) } if eu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldCreatedAt, - }) + _spec.ClearField(event.FieldCreatedAt, field.TypeTime) } if value, ok := eu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldUpdatedAt, - }) + _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } if eu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldUpdatedAt, - }) + _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) } if value, ok := eu.mutation.Time(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldTime, - }) + _spec.SetField(event.FieldTime, field.TypeTime, value) } if value, ok := eu.mutation.Serialized(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: event.FieldSerialized, - }) + _spec.SetField(event.FieldSerialized, field.TypeString, value) } if eu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -264,10 +203,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -280,10 +216,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -299,6 +232,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + eu.mutation.done = true return n, nil } @@ -396,6 +330,12 @@ func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne { return euo } +// Where appends a list predicates to the EventUpdate builder. +func (euo *EventUpdateOne) Where(ps ...predicate.Event) *EventUpdateOne { + euo.mutation.Where(ps...) + return euo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOne { @@ -405,47 +345,8 @@ func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOn // Save executes the query and returns the updated Event entity. func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) { - var ( - err error - node *Event - ) euo.defaults() - if len(euo.hooks) == 0 { - if err = euo.check(); err != nil { - return nil, err - } - node, err = euo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = euo.check(); err != nil { - return nil, err - } - euo.mutation = mutation - node, err = euo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(euo.hooks) - 1; i >= 0; i-- { - if euo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = euo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, euo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Event) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, euo.sqlSave, euo.mutation, euo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -493,16 +394,10 @@ func (euo *EventUpdateOne) check() error { } func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - Columns: event.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, + if err := euo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) id, ok := euo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Event.id" for update`)} @@ -528,44 +423,22 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } } if value, ok := euo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldCreatedAt, - }) + _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) } if euo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldCreatedAt, - }) + _spec.ClearField(event.FieldCreatedAt, field.TypeTime) } if value, ok := euo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldUpdatedAt, - }) + _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } if euo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldUpdatedAt, - }) + _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) } if value, ok := euo.mutation.Time(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldTime, - }) + _spec.SetField(event.FieldTime, field.TypeTime, value) } if value, ok := euo.mutation.Serialized(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: event.FieldSerialized, - }) + _spec.SetField(event.FieldSerialized, field.TypeString, value) } if euo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -575,10 +448,7 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -591,10 +461,7 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -613,5 +480,6 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } return nil, err } + euo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index 85ab00b01fb..7ec9c3ab1d8 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -15,11 +15,10 @@ type AlertFunc func(context.Context, *ent.AlertMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f AlertFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) + if mv, ok := m.(*ent.AlertMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) } // The BouncerFunc type is an adapter to allow the use of ordinary @@ -28,11 +27,10 @@ type BouncerFunc func(context.Context, *ent.BouncerMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f BouncerFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m) + if mv, ok := m.(*ent.BouncerMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m) } // The ConfigItemFunc type is an adapter to allow the use of ordinary @@ -41,11 +39,10 @@ type ConfigItemFunc func(context.Context, *ent.ConfigItemMutation) (ent.Value, e // Mutate calls f(ctx, m). func (f ConfigItemFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m) + if mv, ok := m.(*ent.ConfigItemMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m) } // The DecisionFunc type is an adapter to allow the use of ordinary @@ -54,11 +51,10 @@ type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error // Mutate calls f(ctx, m). func (f DecisionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m) + if mv, ok := m.(*ent.DecisionMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m) } // The EventFunc type is an adapter to allow the use of ordinary @@ -67,11 +63,10 @@ type EventFunc func(context.Context, *ent.EventMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) + if mv, ok := m.(*ent.EventMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) } // The MachineFunc type is an adapter to allow the use of ordinary @@ -80,11 +75,10 @@ type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f MachineFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) + if mv, ok := m.(*ent.MachineMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) } // The MetaFunc type is an adapter to allow the use of ordinary @@ -93,11 +87,10 @@ type MetaFunc func(context.Context, *ent.MetaMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) + if mv, ok := m.(*ent.MetaMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) } // Condition is a hook condition function. diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index dc2b18ee81c..346a8d084ba 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" ) @@ -42,7 +43,8 @@ type Machine struct { AuthType string `json:"auth_type"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MachineQuery when eager-loading is set. - Edges MachineEdges `json:"edges"` + Edges MachineEdges `json:"edges"` + selectValues sql.SelectValues } // MachineEdges holds the relations/edges for other nodes in the graph. @@ -77,7 +79,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) { case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -173,21 +175,29 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.AuthType = value.String } + default: + m.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Machine. +// This includes values selected through modifiers, order, etc. +func (m *Machine) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + // QueryAlerts queries the "alerts" edge of the Machine entity. func (m *Machine) QueryAlerts() *AlertQuery { - return (&MachineClient{config: m.config}).QueryAlerts(m) + return NewMachineClient(m.config).QueryAlerts(m) } // Update returns a builder for updating this Machine. // Note that you need to call Machine.Unwrap() before calling this method if this Machine // was returned from a transaction, and the transaction was committed or rolled back. func (m *Machine) Update() *MachineUpdateOne { - return (&MachineClient{config: m.config}).UpdateOne(m) + return NewMachineClient(m.config).UpdateOne(m) } // Unwrap unwraps the Machine entity that was returned from a transaction after it was closed, @@ -254,9 +264,3 @@ func (m *Machine) String() string { // Machines is a parsable slice of Machine. type Machines []*Machine - -func (m Machines) config(cfg config) { - for _i := range m { - m[_i].config = cfg - } -} diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index e6900dd21e1..5456935e04c 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -4,6 +4,9 @@ package machine import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -99,3 +102,92 @@ var ( // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string ) + +// OrderOption defines the ordering options for the Machine queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByLastPush orders the results by the last_push field. +func ByLastPush(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastPush, opts...).ToFunc() +} + +// ByLastHeartbeat orders the results by the last_heartbeat field. +func ByLastHeartbeat(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastHeartbeat, opts...).ToFunc() +} + +// ByMachineId orders the results by the machineId field. +func ByMachineId(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMachineId, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByIpAddress orders the results by the ipAddress field. +func ByIpAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIpAddress, opts...).ToFunc() +} + +// ByScenarios orders the results by the scenarios field. +func ByScenarios(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenarios, opts...).ToFunc() +} + +// ByVersion orders the results by the version field. +func ByVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVersion, opts...).ToFunc() +} + +// ByIsValidated orders the results by the isValidated field. +func ByIsValidated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsValidated, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByAuthType orders the results by the auth_type field. +func ByAuthType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAuthType, opts...).ToFunc() +} + +// ByAlertsCount orders the results by alerts count. +func ByAlertsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAlertsStep(), opts...) + } +} + +// ByAlerts orders the results by alerts terms. +func ByAlerts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAlertsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAlertsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AlertsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), + ) +} diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index 7d0227731cc..e9d00e7e01e 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -12,1218 +12,802 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) } // LastPush applies equality check predicate on the "last_push" field. It's identical to LastPushEQ. func LastPush(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) } // LastHeartbeat applies equality check predicate on the "last_heartbeat" field. It's identical to LastHeartbeatEQ. func LastHeartbeat(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastHeartbeat, v)) } // MachineId applies equality check predicate on the "machineId" field. It's identical to MachineIdEQ. func MachineId(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldMachineId, v)) } // Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. func Password(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldPassword, v)) } // IpAddress applies equality check predicate on the "ipAddress" field. It's identical to IpAddressEQ. func IpAddress(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIpAddress, v)) } // Scenarios applies equality check predicate on the "scenarios" field. It's identical to ScenariosEQ. func Scenarios(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldScenarios, v)) } // Version applies equality check predicate on the "version" field. It's identical to VersionEQ. func Version(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldVersion, v)) } // IsValidated applies equality check predicate on the "isValidated" field. It's identical to IsValidatedEQ. func IsValidated(v bool) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIsValidated), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } // Status applies equality check predicate on the "status" field. It's identical to StatusEQ. func Status(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldStatus, v)) } // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Machine(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Machine(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Machine(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Machine(sql.FieldNotNull(FieldUpdatedAt)) } // LastPushEQ applies the EQ predicate on the "last_push" field. func LastPushEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) } // LastPushNEQ applies the NEQ predicate on the "last_push" field. func LastPushNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldLastPush, v)) } // LastPushIn applies the In predicate on the "last_push" field. func LastPushIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLastPush), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldLastPush, vs...)) } // LastPushNotIn applies the NotIn predicate on the "last_push" field. func LastPushNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLastPush), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldLastPush, vs...)) } // LastPushGT applies the GT predicate on the "last_push" field. func LastPushGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldGT(FieldLastPush, v)) } // LastPushGTE applies the GTE predicate on the "last_push" field. func LastPushGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldLastPush, v)) } // LastPushLT applies the LT predicate on the "last_push" field. func LastPushLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldLT(FieldLastPush, v)) } // LastPushLTE applies the LTE predicate on the "last_push" field. func LastPushLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldLastPush, v)) } // LastPushIsNil applies the IsNil predicate on the "last_push" field. func LastPushIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLastPush))) - }) + return predicate.Machine(sql.FieldIsNull(FieldLastPush)) } // LastPushNotNil applies the NotNil predicate on the "last_push" field. func LastPushNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLastPush))) - }) + return predicate.Machine(sql.FieldNotNull(FieldLastPush)) } // LastHeartbeatEQ applies the EQ predicate on the "last_heartbeat" field. func LastHeartbeatEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastHeartbeat, v)) } // LastHeartbeatNEQ applies the NEQ predicate on the "last_heartbeat" field. func LastHeartbeatNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldLastHeartbeat, v)) } // LastHeartbeatIn applies the In predicate on the "last_heartbeat" field. func LastHeartbeatIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLastHeartbeat), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldLastHeartbeat, vs...)) } // LastHeartbeatNotIn applies the NotIn predicate on the "last_heartbeat" field. func LastHeartbeatNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLastHeartbeat), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldLastHeartbeat, vs...)) } // LastHeartbeatGT applies the GT predicate on the "last_heartbeat" field. func LastHeartbeatGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldGT(FieldLastHeartbeat, v)) } // LastHeartbeatGTE applies the GTE predicate on the "last_heartbeat" field. func LastHeartbeatGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldLastHeartbeat, v)) } // LastHeartbeatLT applies the LT predicate on the "last_heartbeat" field. func LastHeartbeatLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldLT(FieldLastHeartbeat, v)) } // LastHeartbeatLTE applies the LTE predicate on the "last_heartbeat" field. func LastHeartbeatLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldLastHeartbeat, v)) } // LastHeartbeatIsNil applies the IsNil predicate on the "last_heartbeat" field. func LastHeartbeatIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLastHeartbeat))) - }) + return predicate.Machine(sql.FieldIsNull(FieldLastHeartbeat)) } // LastHeartbeatNotNil applies the NotNil predicate on the "last_heartbeat" field. func LastHeartbeatNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLastHeartbeat))) - }) + return predicate.Machine(sql.FieldNotNull(FieldLastHeartbeat)) } // MachineIdEQ applies the EQ predicate on the "machineId" field. func MachineIdEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldMachineId, v)) } // MachineIdNEQ applies the NEQ predicate on the "machineId" field. func MachineIdNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldMachineId, v)) } // MachineIdIn applies the In predicate on the "machineId" field. func MachineIdIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldMachineId), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldMachineId, vs...)) } // MachineIdNotIn applies the NotIn predicate on the "machineId" field. func MachineIdNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldMachineId), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldMachineId, vs...)) } // MachineIdGT applies the GT predicate on the "machineId" field. func MachineIdGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldGT(FieldMachineId, v)) } // MachineIdGTE applies the GTE predicate on the "machineId" field. func MachineIdGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldMachineId, v)) } // MachineIdLT applies the LT predicate on the "machineId" field. func MachineIdLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldLT(FieldMachineId, v)) } // MachineIdLTE applies the LTE predicate on the "machineId" field. func MachineIdLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldMachineId, v)) } // MachineIdContains applies the Contains predicate on the "machineId" field. func MachineIdContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldContains(FieldMachineId, v)) } // MachineIdHasPrefix applies the HasPrefix predicate on the "machineId" field. func MachineIdHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldMachineId, v)) } // MachineIdHasSuffix applies the HasSuffix predicate on the "machineId" field. func MachineIdHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldMachineId, v)) } // MachineIdEqualFold applies the EqualFold predicate on the "machineId" field. func MachineIdEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldMachineId, v)) } // MachineIdContainsFold applies the ContainsFold predicate on the "machineId" field. func MachineIdContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldMachineId, v)) } // PasswordEQ applies the EQ predicate on the "password" field. func PasswordEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldPassword, v)) } // PasswordNEQ applies the NEQ predicate on the "password" field. func PasswordNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldPassword, v)) } // PasswordIn applies the In predicate on the "password" field. func PasswordIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldPassword), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldPassword, vs...)) } // PasswordNotIn applies the NotIn predicate on the "password" field. func PasswordNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldPassword), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldPassword, vs...)) } // PasswordGT applies the GT predicate on the "password" field. func PasswordGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldGT(FieldPassword, v)) } // PasswordGTE applies the GTE predicate on the "password" field. func PasswordGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldPassword, v)) } // PasswordLT applies the LT predicate on the "password" field. func PasswordLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldLT(FieldPassword, v)) } // PasswordLTE applies the LTE predicate on the "password" field. func PasswordLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldPassword, v)) } // PasswordContains applies the Contains predicate on the "password" field. func PasswordContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldContains(FieldPassword, v)) } // PasswordHasPrefix applies the HasPrefix predicate on the "password" field. func PasswordHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldPassword, v)) } // PasswordHasSuffix applies the HasSuffix predicate on the "password" field. func PasswordHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldPassword, v)) } // PasswordEqualFold applies the EqualFold predicate on the "password" field. func PasswordEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldPassword, v)) } // PasswordContainsFold applies the ContainsFold predicate on the "password" field. func PasswordContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldPassword, v)) } // IpAddressEQ applies the EQ predicate on the "ipAddress" field. func IpAddressEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIpAddress, v)) } // IpAddressNEQ applies the NEQ predicate on the "ipAddress" field. func IpAddressNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldIpAddress, v)) } // IpAddressIn applies the In predicate on the "ipAddress" field. func IpAddressIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldIpAddress), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldIpAddress, vs...)) } // IpAddressNotIn applies the NotIn predicate on the "ipAddress" field. func IpAddressNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldIpAddress), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldIpAddress, vs...)) } // IpAddressGT applies the GT predicate on the "ipAddress" field. func IpAddressGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldGT(FieldIpAddress, v)) } // IpAddressGTE applies the GTE predicate on the "ipAddress" field. func IpAddressGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldIpAddress, v)) } // IpAddressLT applies the LT predicate on the "ipAddress" field. func IpAddressLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldLT(FieldIpAddress, v)) } // IpAddressLTE applies the LTE predicate on the "ipAddress" field. func IpAddressLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldIpAddress, v)) } // IpAddressContains applies the Contains predicate on the "ipAddress" field. func IpAddressContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldContains(FieldIpAddress, v)) } // IpAddressHasPrefix applies the HasPrefix predicate on the "ipAddress" field. func IpAddressHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldIpAddress, v)) } // IpAddressHasSuffix applies the HasSuffix predicate on the "ipAddress" field. func IpAddressHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldIpAddress, v)) } // IpAddressEqualFold applies the EqualFold predicate on the "ipAddress" field. func IpAddressEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldIpAddress, v)) } // IpAddressContainsFold applies the ContainsFold predicate on the "ipAddress" field. func IpAddressContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldIpAddress, v)) } // ScenariosEQ applies the EQ predicate on the "scenarios" field. func ScenariosEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldScenarios, v)) } // ScenariosNEQ applies the NEQ predicate on the "scenarios" field. func ScenariosNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldScenarios, v)) } // ScenariosIn applies the In predicate on the "scenarios" field. func ScenariosIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenarios), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldScenarios, vs...)) } // ScenariosNotIn applies the NotIn predicate on the "scenarios" field. func ScenariosNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenarios), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldScenarios, vs...)) } // ScenariosGT applies the GT predicate on the "scenarios" field. func ScenariosGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldGT(FieldScenarios, v)) } // ScenariosGTE applies the GTE predicate on the "scenarios" field. func ScenariosGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldScenarios, v)) } // ScenariosLT applies the LT predicate on the "scenarios" field. func ScenariosLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldLT(FieldScenarios, v)) } // ScenariosLTE applies the LTE predicate on the "scenarios" field. func ScenariosLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldScenarios, v)) } // ScenariosContains applies the Contains predicate on the "scenarios" field. func ScenariosContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldContains(FieldScenarios, v)) } // ScenariosHasPrefix applies the HasPrefix predicate on the "scenarios" field. func ScenariosHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldScenarios, v)) } // ScenariosHasSuffix applies the HasSuffix predicate on the "scenarios" field. func ScenariosHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldScenarios, v)) } // ScenariosIsNil applies the IsNil predicate on the "scenarios" field. func ScenariosIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldScenarios))) - }) + return predicate.Machine(sql.FieldIsNull(FieldScenarios)) } // ScenariosNotNil applies the NotNil predicate on the "scenarios" field. func ScenariosNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldScenarios))) - }) + return predicate.Machine(sql.FieldNotNull(FieldScenarios)) } // ScenariosEqualFold applies the EqualFold predicate on the "scenarios" field. func ScenariosEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldScenarios, v)) } // ScenariosContainsFold applies the ContainsFold predicate on the "scenarios" field. func ScenariosContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldScenarios, v)) } // VersionEQ applies the EQ predicate on the "version" field. func VersionEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldVersion, v)) } // VersionNEQ applies the NEQ predicate on the "version" field. func VersionNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldVersion, v)) } // VersionIn applies the In predicate on the "version" field. func VersionIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldVersion), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldVersion, vs...)) } // VersionNotIn applies the NotIn predicate on the "version" field. func VersionNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldVersion), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldVersion, vs...)) } // VersionGT applies the GT predicate on the "version" field. func VersionGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldGT(FieldVersion, v)) } // VersionGTE applies the GTE predicate on the "version" field. func VersionGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldVersion, v)) } // VersionLT applies the LT predicate on the "version" field. func VersionLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldLT(FieldVersion, v)) } // VersionLTE applies the LTE predicate on the "version" field. func VersionLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldVersion, v)) } // VersionContains applies the Contains predicate on the "version" field. func VersionContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldContains(FieldVersion, v)) } // VersionHasPrefix applies the HasPrefix predicate on the "version" field. func VersionHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldVersion, v)) } // VersionHasSuffix applies the HasSuffix predicate on the "version" field. func VersionHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldVersion, v)) } // VersionIsNil applies the IsNil predicate on the "version" field. func VersionIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldVersion))) - }) + return predicate.Machine(sql.FieldIsNull(FieldVersion)) } // VersionNotNil applies the NotNil predicate on the "version" field. func VersionNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldVersion))) - }) + return predicate.Machine(sql.FieldNotNull(FieldVersion)) } // VersionEqualFold applies the EqualFold predicate on the "version" field. func VersionEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldVersion, v)) } // VersionContainsFold applies the ContainsFold predicate on the "version" field. func VersionContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldVersion, v)) } // IsValidatedEQ applies the EQ predicate on the "isValidated" field. func IsValidatedEQ(v bool) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIsValidated), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } // IsValidatedNEQ applies the NEQ predicate on the "isValidated" field. func IsValidatedNEQ(v bool) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIsValidated), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldIsValidated, v)) } // StatusEQ applies the EQ predicate on the "status" field. func StatusEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldStatus, v)) } // StatusNEQ applies the NEQ predicate on the "status" field. func StatusNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldStatus, v)) } // StatusIn applies the In predicate on the "status" field. func StatusIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStatus), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldStatus, vs...)) } // StatusNotIn applies the NotIn predicate on the "status" field. func StatusNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStatus), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldStatus, vs...)) } // StatusGT applies the GT predicate on the "status" field. func StatusGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldGT(FieldStatus, v)) } // StatusGTE applies the GTE predicate on the "status" field. func StatusGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldStatus, v)) } // StatusLT applies the LT predicate on the "status" field. func StatusLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldLT(FieldStatus, v)) } // StatusLTE applies the LTE predicate on the "status" field. func StatusLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldStatus, v)) } // StatusContains applies the Contains predicate on the "status" field. func StatusContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldContains(FieldStatus, v)) } // StatusHasPrefix applies the HasPrefix predicate on the "status" field. func StatusHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldStatus, v)) } // StatusHasSuffix applies the HasSuffix predicate on the "status" field. func StatusHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldStatus, v)) } // StatusIsNil applies the IsNil predicate on the "status" field. func StatusIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStatus))) - }) + return predicate.Machine(sql.FieldIsNull(FieldStatus)) } // StatusNotNil applies the NotNil predicate on the "status" field. func StatusNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStatus))) - }) + return predicate.Machine(sql.FieldNotNull(FieldStatus)) } // StatusEqualFold applies the EqualFold predicate on the "status" field. func StatusEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldStatus, v)) } // StatusContainsFold applies the ContainsFold predicate on the "status" field. func StatusContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldStatus, v)) } // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } // AuthTypeNEQ applies the NEQ predicate on the "auth_type" field. func AuthTypeNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldAuthType, v)) } // AuthTypeIn applies the In predicate on the "auth_type" field. func AuthTypeIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAuthType), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldAuthType, vs...)) } // AuthTypeNotIn applies the NotIn predicate on the "auth_type" field. func AuthTypeNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAuthType), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldAuthType, vs...)) } // AuthTypeGT applies the GT predicate on the "auth_type" field. func AuthTypeGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldGT(FieldAuthType, v)) } // AuthTypeGTE applies the GTE predicate on the "auth_type" field. func AuthTypeGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldAuthType, v)) } // AuthTypeLT applies the LT predicate on the "auth_type" field. func AuthTypeLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldLT(FieldAuthType, v)) } // AuthTypeLTE applies the LTE predicate on the "auth_type" field. func AuthTypeLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldAuthType, v)) } // AuthTypeContains applies the Contains predicate on the "auth_type" field. func AuthTypeContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldContains(FieldAuthType, v)) } // AuthTypeHasPrefix applies the HasPrefix predicate on the "auth_type" field. func AuthTypeHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldAuthType, v)) } // AuthTypeHasSuffix applies the HasSuffix predicate on the "auth_type" field. func AuthTypeHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldAuthType, v)) } // AuthTypeEqualFold applies the EqualFold predicate on the "auth_type" field. func AuthTypeEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldAuthType, v)) } // AuthTypeContainsFold applies the ContainsFold predicate on the "auth_type" field. func AuthTypeContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldAuthType, v)) } // HasAlerts applies the HasEdge predicate on the "alerts" edge. @@ -1231,7 +815,6 @@ func HasAlerts() predicate.Machine { return predicate.Machine(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AlertsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1241,11 +824,7 @@ func HasAlerts() predicate.Machine { // HasAlertsWith applies the HasEdge predicate on the "alerts" edge with a given conditions (other predicates). func HasAlertsWith(preds ...predicate.Alert) predicate.Machine { return predicate.Machine(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AlertsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), - ) + step := newAlertsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1256,32 +835,15 @@ func HasAlertsWith(preds ...predicate.Alert) predicate.Machine { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Machine) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Machine(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Machine) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Machine(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Machine) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Machine(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index efe02782f6b..ff704e6ab74 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -187,50 +187,8 @@ func (mc *MachineCreate) Mutation() *MachineMutation { // Save creates the Machine in the database. func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) { - var ( - err error - node *Machine - ) mc.defaults() - if len(mc.hooks) == 0 { - if err = mc.check(); err != nil { - return nil, err - } - node, err = mc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mc.check(); err != nil { - return nil, err - } - mc.mutation = mutation - if node, err = mc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(mc.hooks) - 1; i >= 0; i-- { - if mc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, mc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Machine) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -309,6 +267,9 @@ func (mc *MachineCreate) check() error { } func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { + if err := mc.check(); err != nil { + return nil, err + } _node, _spec := mc.createSpec() if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -318,114 +279,62 @@ func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true return _node, nil } func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { var ( _node = &Machine{config: mc.config} - _spec = &sqlgraph.CreateSpec{ - Table: machine.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) ) if value, ok := mc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldCreatedAt, - }) + _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := mc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldUpdatedAt, - }) + _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := mc.mutation.LastPush(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastPush, - }) + _spec.SetField(machine.FieldLastPush, field.TypeTime, value) _node.LastPush = &value } if value, ok := mc.mutation.LastHeartbeat(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastHeartbeat, - }) + _spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value) _node.LastHeartbeat = &value } if value, ok := mc.mutation.MachineId(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldMachineId, - }) + _spec.SetField(machine.FieldMachineId, field.TypeString, value) _node.MachineId = value } if value, ok := mc.mutation.Password(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldPassword, - }) + _spec.SetField(machine.FieldPassword, field.TypeString, value) _node.Password = value } if value, ok := mc.mutation.IpAddress(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldIpAddress, - }) + _spec.SetField(machine.FieldIpAddress, field.TypeString, value) _node.IpAddress = value } if value, ok := mc.mutation.Scenarios(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldScenarios, - }) + _spec.SetField(machine.FieldScenarios, field.TypeString, value) _node.Scenarios = value } if value, ok := mc.mutation.Version(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldVersion, - }) + _spec.SetField(machine.FieldVersion, field.TypeString, value) _node.Version = value } if value, ok := mc.mutation.IsValidated(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: machine.FieldIsValidated, - }) + _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) _node.IsValidated = value } if value, ok := mc.mutation.Status(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldStatus, - }) + _spec.SetField(machine.FieldStatus, field.TypeString, value) _node.Status = value } if value, ok := mc.mutation.AuthType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldAuthType, - }) + _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value } if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { @@ -436,10 +345,7 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -453,11 +359,15 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { // MachineCreateBulk is the builder for creating many Machine entities in bulk. type MachineCreateBulk struct { config + err error builders []*MachineCreate } // Save creates the Machine entities in the database. func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { + if mcb.err != nil { + return nil, mcb.err + } specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) nodes := make([]*Machine, len(mcb.builders)) mutators := make([]Mutator, len(mcb.builders)) @@ -474,8 +384,8 @@ func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/machine_delete.go b/pkg/database/ent/machine_delete.go index bead8acb46d..ac3aa751d5e 100644 --- a/pkg/database/ent/machine_delete.go +++ b/pkg/database/ent/machine_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (md *MachineDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(md.hooks) == 0 { - affected, err = md.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - md.mutation = mutation - affected, err = md.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(md.hooks) - 1; i >= 0; i-- { - if md.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = md.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, md.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (md *MachineDelete) ExecX(ctx context.Context) int { } func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) if ps := md.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + md.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type MachineDeleteOne struct { md *MachineDelete } +// Where appends a list predicates to the MachineDelete builder. +func (mdo *MachineDeleteOne) Where(ps ...predicate.Machine) *MachineDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + // Exec executes the deletion query. func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { n, err := mdo.md.Exec(ctx) @@ -111,5 +82,7 @@ func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (mdo *MachineDeleteOne) ExecX(ctx context.Context) { - mdo.md.ExecX(ctx) + if err := mdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/machine_query.go b/pkg/database/ent/machine_query.go index 2839142196b..462c2cf35b1 100644 --- a/pkg/database/ent/machine_query.go +++ b/pkg/database/ent/machine_query.go @@ -19,11 +19,9 @@ import ( // MachineQuery is the builder for querying Machine entities. type MachineQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []machine.OrderOption + inters []Interceptor predicates []predicate.Machine withAlerts *AlertQuery // intermediate query (i.e. traversal path). @@ -37,34 +35,34 @@ func (mq *MachineQuery) Where(ps ...predicate.Machine) *MachineQuery { return mq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (mq *MachineQuery) Limit(limit int) *MachineQuery { - mq.limit = &limit + mq.ctx.Limit = &limit return mq } -// Offset adds an offset step to the query. +// Offset to start from. func (mq *MachineQuery) Offset(offset int) *MachineQuery { - mq.offset = &offset + mq.ctx.Offset = &offset return mq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (mq *MachineQuery) Unique(unique bool) *MachineQuery { - mq.unique = &unique + mq.ctx.Unique = &unique return mq } -// Order adds an order step to the query. -func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery { +// Order specifies how the records should be ordered. +func (mq *MachineQuery) Order(o ...machine.OrderOption) *MachineQuery { mq.order = append(mq.order, o...) return mq } // QueryAlerts chains the current query on the "alerts" edge. func (mq *MachineQuery) QueryAlerts() *AlertQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := mq.prepareQuery(ctx); err != nil { return nil, err @@ -87,7 +85,7 @@ func (mq *MachineQuery) QueryAlerts() *AlertQuery { // First returns the first Machine entity from the query. // Returns a *NotFoundError when no Machine was found. func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) { - nodes, err := mq.Limit(1).All(ctx) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) if err != nil { return nil, err } @@ -110,7 +108,7 @@ func (mq *MachineQuery) FirstX(ctx context.Context) *Machine { // Returns a *NotFoundError when no Machine ID was found. func (mq *MachineQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(ctx); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -133,7 +131,7 @@ func (mq *MachineQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Machine entity is found. // Returns a *NotFoundError when no Machine entities are found. func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) { - nodes, err := mq.Limit(2).All(ctx) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) if err != nil { return nil, err } @@ -161,7 +159,7 @@ func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine { // Returns a *NotFoundError when no entities are found. func (mq *MachineQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(ctx); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -186,10 +184,12 @@ func (mq *MachineQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Machines. func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) { + ctx = setContextOp(ctx, mq.ctx, "All") if err := mq.prepareQuery(ctx); err != nil { return nil, err } - return mq.sqlAll(ctx) + qr := querierAll[[]*Machine, *MachineQuery]() + return withInterceptors[[]*Machine](ctx, mq, qr, mq.inters) } // AllX is like All, but panics if an error occurs. @@ -202,9 +202,12 @@ func (mq *MachineQuery) AllX(ctx context.Context) []*Machine { } // IDs executes the query and returns a list of Machine IDs. -func (mq *MachineQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { +func (mq *MachineQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -221,10 +224,11 @@ func (mq *MachineQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MachineQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") if err := mq.prepareQuery(ctx); err != nil { return 0, err } - return mq.sqlCount(ctx) + return withInterceptors[int](ctx, mq, querierCount[*MachineQuery](), mq.inters) } // CountX is like Count, but panics if an error occurs. @@ -238,10 +242,15 @@ func (mq *MachineQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) { - if err := mq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return mq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -261,22 +270,21 @@ func (mq *MachineQuery) Clone() *MachineQuery { } return &MachineQuery{ config: mq.config, - limit: mq.limit, - offset: mq.offset, - order: append([]OrderFunc{}, mq.order...), + ctx: mq.ctx.Clone(), + order: append([]machine.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), predicates: append([]predicate.Machine{}, mq.predicates...), withAlerts: mq.withAlerts.Clone(), // clone intermediate query. - sql: mq.sql.Clone(), - path: mq.path, - unique: mq.unique, + sql: mq.sql.Clone(), + path: mq.path, } } // WithAlerts tells the query-builder to eager-load the nodes that are connected to // the "alerts" edge. The optional arguments are used to configure the query builder of the edge. func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() for _, opt := range opts { opt(query) } @@ -299,16 +307,11 @@ func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy { - grbuild := &MachineGroupBy{config: mq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { - return nil, err - } - return mq.sqlQuery(ctx), nil - } + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MachineGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields grbuild.label = machine.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -325,15 +328,30 @@ func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy // Select(machine.FieldCreatedAt). // Scan(ctx, &v) func (mq *MachineQuery) Select(fields ...string) *MachineSelect { - mq.fields = append(mq.fields, fields...) - selbuild := &MachineSelect{MachineQuery: mq} - selbuild.label = machine.Label - selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan - return selbuild + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MachineSelect{MachineQuery: mq} + sbuild.label = machine.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MachineSelect configured with the given aggregations. +func (mq *MachineQuery) Aggregate(fns ...AggregateFunc) *MachineSelect { + return mq.Select().Aggregate(fns...) } func (mq *MachineQuery) prepareQuery(ctx context.Context) error { - for _, f := range mq.fields { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { if !machine.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -396,7 +414,7 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes } query.withFKs = true query.Where(predicate.Alert(func(s *sql.Selector) { - s.Where(sql.InValues(machine.AlertsColumn, fks...)) + s.Where(sql.InValues(s.C(machine.AlertsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -409,7 +427,7 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -418,41 +436,22 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) { _spec := mq.querySpec() - _spec.Node.Columns = mq.fields - if len(mq.fields) > 0 { - _spec.Unique = mq.unique != nil && *mq.unique + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique } return sqlgraph.CountNodes(ctx, mq.driver, _spec) } -func (mq *MachineQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := mq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - Columns: machine.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, - From: mq.sql, - Unique: true, - } - if unique := mq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true } - if fields := mq.fields; len(fields) > 0 { + if fields := mq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID) for i := range fields { @@ -468,10 +467,10 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := mq.order; len(ps) > 0 { @@ -487,7 +486,7 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(mq.driver.Dialect()) t1 := builder.Table(machine.Table) - columns := mq.fields + columns := mq.ctx.Fields if len(columns) == 0 { columns = machine.Columns } @@ -496,7 +495,7 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = mq.sql selector.Select(selector.Columns(columns...)...) } - if mq.unique != nil && *mq.unique { + if mq.ctx.Unique != nil && *mq.ctx.Unique { selector.Distinct() } for _, p := range mq.predicates { @@ -505,12 +504,12 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range mq.order { p(selector) } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -518,13 +517,8 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { // MachineGroupBy is the group-by builder for Machine entities. type MachineGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *MachineQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -533,74 +527,77 @@ func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy { return mgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (mgb *MachineGroupBy) Scan(ctx context.Context, v any) error { - query, err := mgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { return err } - mgb.sql = query - return mgb.sqlScan(ctx, v) + return scanWithInterceptors[*MachineQuery, *MachineGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) } -func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range mgb.fields { - if !machine.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (mgb *MachineGroupBy) sqlScan(ctx context.Context, root *MachineQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := mgb.sqlQuery() + selector.GroupBy(selector.Columns(*mgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (mgb *MachineGroupBy) sqlQuery() *sql.Selector { - selector := mgb.sql.Select() - aggregation := make([]string, 0, len(mgb.fns)) - for _, fn := range mgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) - for _, f := range mgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(mgb.fields...)...) -} - // MachineSelect is the builder for selecting fields of Machine entities. type MachineSelect struct { *MachineQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MachineSelect) Aggregate(fns ...AggregateFunc) *MachineSelect { + ms.fns = append(ms.fns, fns...) + return ms } // Scan applies the selector query and scans the result into the given value. func (ms *MachineSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") if err := ms.prepareQuery(ctx); err != nil { return err } - ms.sql = ms.MachineQuery.sqlQuery(ctx) - return ms.sqlScan(ctx, v) + return scanWithInterceptors[*MachineQuery, *MachineSelect](ctx, ms.MachineQuery, ms, ms.inters, v) } -func (ms *MachineSelect) sqlScan(ctx context.Context, v any) error { +func (ms *MachineSelect) sqlScan(ctx context.Context, root *MachineQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := ms.sql.Query() + query, args := selector.Query() if err := ms.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index de9f8d12460..eb517081174 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -226,41 +226,8 @@ func (mu *MachineUpdate) RemoveAlerts(a ...*Alert) *MachineUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (mu *MachineUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) mu.defaults() - if len(mu.hooks) == 0 { - if err = mu.check(); err != nil { - return 0, err - } - affected, err = mu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mu.check(); err != nil { - return 0, err - } - mu.mutation = mutation - affected, err = mu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(mu.hooks) - 1; i >= 0; i-- { - if mu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, mu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -316,16 +283,10 @@ func (mu *MachineUpdate) check() error { } func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - Columns: machine.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, + if err := mu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) if ps := mu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -334,130 +295,61 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := mu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldCreatedAt, - }) + _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) } if mu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldCreatedAt, - }) + _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) } if value, ok := mu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldUpdatedAt, - }) + _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } if mu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldUpdatedAt, - }) + _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) } if value, ok := mu.mutation.LastPush(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastPush, - }) + _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } if mu.mutation.LastPushCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastPush, - }) + _spec.ClearField(machine.FieldLastPush, field.TypeTime) } if value, ok := mu.mutation.LastHeartbeat(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastHeartbeat, - }) + _spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value) } if mu.mutation.LastHeartbeatCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastHeartbeat, - }) + _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } if value, ok := mu.mutation.MachineId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldMachineId, - }) + _spec.SetField(machine.FieldMachineId, field.TypeString, value) } if value, ok := mu.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldPassword, - }) + _spec.SetField(machine.FieldPassword, field.TypeString, value) } if value, ok := mu.mutation.IpAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldIpAddress, - }) + _spec.SetField(machine.FieldIpAddress, field.TypeString, value) } if value, ok := mu.mutation.Scenarios(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldScenarios, - }) + _spec.SetField(machine.FieldScenarios, field.TypeString, value) } if mu.mutation.ScenariosCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldScenarios, - }) + _spec.ClearField(machine.FieldScenarios, field.TypeString) } if value, ok := mu.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldVersion, - }) + _spec.SetField(machine.FieldVersion, field.TypeString, value) } if mu.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldVersion, - }) + _spec.ClearField(machine.FieldVersion, field.TypeString) } if value, ok := mu.mutation.IsValidated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: machine.FieldIsValidated, - }) + _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } if value, ok := mu.mutation.Status(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldStatus, - }) + _spec.SetField(machine.FieldStatus, field.TypeString, value) } if mu.mutation.StatusCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldStatus, - }) + _spec.ClearField(machine.FieldStatus, field.TypeString) } if value, ok := mu.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldAuthType, - }) + _spec.SetField(machine.FieldAuthType, field.TypeString, value) } if mu.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ @@ -467,10 +359,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -483,10 +372,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -502,10 +388,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -521,6 +404,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + mu.mutation.done = true return n, nil } @@ -727,6 +611,12 @@ func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne { return muo.RemoveAlertIDs(ids...) } +// Where appends a list predicates to the MachineUpdate builder. +func (muo *MachineUpdateOne) Where(ps ...predicate.Machine) *MachineUpdateOne { + muo.mutation.Where(ps...) + return muo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne { @@ -736,47 +626,8 @@ func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpda // Save executes the query and returns the updated Machine entity. func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) { - var ( - err error - node *Machine - ) muo.defaults() - if len(muo.hooks) == 0 { - if err = muo.check(); err != nil { - return nil, err - } - node, err = muo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = muo.check(); err != nil { - return nil, err - } - muo.mutation = mutation - node, err = muo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(muo.hooks) - 1; i >= 0; i-- { - if muo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = muo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, muo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Machine) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -832,16 +683,10 @@ func (muo *MachineUpdateOne) check() error { } func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - Columns: machine.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, + if err := muo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) id, ok := muo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Machine.id" for update`)} @@ -867,130 +712,61 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } } if value, ok := muo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldCreatedAt, - }) + _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) } if muo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldCreatedAt, - }) + _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) } if value, ok := muo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldUpdatedAt, - }) + _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } if muo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldUpdatedAt, - }) + _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) } if value, ok := muo.mutation.LastPush(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastPush, - }) + _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } if muo.mutation.LastPushCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastPush, - }) + _spec.ClearField(machine.FieldLastPush, field.TypeTime) } if value, ok := muo.mutation.LastHeartbeat(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastHeartbeat, - }) + _spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value) } if muo.mutation.LastHeartbeatCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastHeartbeat, - }) + _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } if value, ok := muo.mutation.MachineId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldMachineId, - }) + _spec.SetField(machine.FieldMachineId, field.TypeString, value) } if value, ok := muo.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldPassword, - }) + _spec.SetField(machine.FieldPassword, field.TypeString, value) } if value, ok := muo.mutation.IpAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldIpAddress, - }) + _spec.SetField(machine.FieldIpAddress, field.TypeString, value) } if value, ok := muo.mutation.Scenarios(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldScenarios, - }) + _spec.SetField(machine.FieldScenarios, field.TypeString, value) } if muo.mutation.ScenariosCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldScenarios, - }) + _spec.ClearField(machine.FieldScenarios, field.TypeString) } if value, ok := muo.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldVersion, - }) + _spec.SetField(machine.FieldVersion, field.TypeString, value) } if muo.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldVersion, - }) + _spec.ClearField(machine.FieldVersion, field.TypeString) } if value, ok := muo.mutation.IsValidated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: machine.FieldIsValidated, - }) + _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } if value, ok := muo.mutation.Status(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldStatus, - }) + _spec.SetField(machine.FieldStatus, field.TypeString, value) } if muo.mutation.StatusCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldStatus, - }) + _spec.ClearField(machine.FieldStatus, field.TypeString) } if value, ok := muo.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldAuthType, - }) + _spec.SetField(machine.FieldAuthType, field.TypeString, value) } if muo.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1000,10 +776,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1016,10 +789,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1035,10 +805,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1057,5 +824,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } return nil, err } + muo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index 660f1a4db73..cadc210937e 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" @@ -29,7 +30,8 @@ type Meta struct { AlertMetas int `json:"alert_metas,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MetaQuery when eager-loading is set. - Edges MetaEdges `json:"edges"` + Edges MetaEdges `json:"edges"` + selectValues sql.SelectValues } // MetaEdges holds the relations/edges for other nodes in the graph. @@ -66,7 +68,7 @@ func (*Meta) scanValues(columns []string) ([]any, error) { case meta.FieldCreatedAt, meta.FieldUpdatedAt: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Meta", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -118,21 +120,29 @@ func (m *Meta) assignValues(columns []string, values []any) error { } else if value.Valid { m.AlertMetas = int(value.Int64) } + default: + m.selectValues.Set(columns[i], values[i]) } } return nil } +// GetValue returns the ent.Value that was dynamically selected and assigned to the Meta. +// This includes values selected through modifiers, order, etc. +func (m *Meta) GetValue(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Meta entity. func (m *Meta) QueryOwner() *AlertQuery { - return (&MetaClient{config: m.config}).QueryOwner(m) + return NewMetaClient(m.config).QueryOwner(m) } // Update returns a builder for updating this Meta. // Note that you need to call Meta.Unwrap() before calling this method if this Meta // was returned from a transaction, and the transaction was committed or rolled back. func (m *Meta) Update() *MetaUpdateOne { - return (&MetaClient{config: m.config}).UpdateOne(m) + return NewMetaClient(m.config).UpdateOne(m) } // Unwrap unwraps the Meta entity that was returned from a transaction after it was closed, @@ -175,9 +185,3 @@ func (m *Meta) String() string { // MetaSlice is a parsable slice of Meta. type MetaSlice []*Meta - -func (m MetaSlice) config(cfg config) { - for _i := range m { - m[_i].config = cfg - } -} diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go index 6d10f258919..583496fb710 100644 --- a/pkg/database/ent/meta/meta.go +++ b/pkg/database/ent/meta/meta.go @@ -4,6 +4,9 @@ package meta import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -66,3 +69,50 @@ var ( // ValueValidator is a validator for the "value" field. It is called by the builders before save. ValueValidator func(string) error ) + +// OrderOption defines the ordering options for the Meta queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByAlertMetas orders the results by the alert_metas field. +func ByAlertMetas(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAlertMetas, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go index 479792fd4a6..7fc99136972 100644 --- a/pkg/database/ent/meta/where.go +++ b/pkg/database/ent/meta/where.go @@ -12,512 +12,332 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) } // Key applies equality check predicate on the "key" field. It's identical to KeyEQ. func Key(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldKey, v)) } // Value applies equality check predicate on the "value" field. It's identical to ValueEQ. func Value(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldValue, v)) } // AlertMetas applies equality check predicate on the "alert_metas" field. It's identical to AlertMetasEQ. func AlertMetas(v int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertMetas), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldAlertMetas, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Meta(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Meta(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Meta(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Meta(sql.FieldNotNull(FieldUpdatedAt)) } // KeyEQ applies the EQ predicate on the "key" field. func KeyEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldKey, v)) } // KeyNEQ applies the NEQ predicate on the "key" field. func KeyNEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldKey, v)) } // KeyIn applies the In predicate on the "key" field. func KeyIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldKey), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldKey, vs...)) } // KeyNotIn applies the NotIn predicate on the "key" field. func KeyNotIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldKey), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldKey, vs...)) } // KeyGT applies the GT predicate on the "key" field. func KeyGT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldGT(FieldKey, v)) } // KeyGTE applies the GTE predicate on the "key" field. func KeyGTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldKey, v)) } // KeyLT applies the LT predicate on the "key" field. func KeyLT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldLT(FieldKey, v)) } // KeyLTE applies the LTE predicate on the "key" field. func KeyLTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldKey, v)) } // KeyContains applies the Contains predicate on the "key" field. func KeyContains(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldContains(FieldKey, v)) } // KeyHasPrefix applies the HasPrefix predicate on the "key" field. func KeyHasPrefix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldHasPrefix(FieldKey, v)) } // KeyHasSuffix applies the HasSuffix predicate on the "key" field. func KeyHasSuffix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldHasSuffix(FieldKey, v)) } // KeyEqualFold applies the EqualFold predicate on the "key" field. func KeyEqualFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldEqualFold(FieldKey, v)) } // KeyContainsFold applies the ContainsFold predicate on the "key" field. func KeyContainsFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldContainsFold(FieldKey, v)) } // ValueEQ applies the EQ predicate on the "value" field. func ValueEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldValue, v)) } // ValueNEQ applies the NEQ predicate on the "value" field. func ValueNEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldValue, v)) } // ValueIn applies the In predicate on the "value" field. func ValueIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldValue), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldValue, vs...)) } // ValueNotIn applies the NotIn predicate on the "value" field. func ValueNotIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldValue), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldValue, vs...)) } // ValueGT applies the GT predicate on the "value" field. func ValueGT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldGT(FieldValue, v)) } // ValueGTE applies the GTE predicate on the "value" field. func ValueGTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldValue, v)) } // ValueLT applies the LT predicate on the "value" field. func ValueLT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldLT(FieldValue, v)) } // ValueLTE applies the LTE predicate on the "value" field. func ValueLTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldValue, v)) } // ValueContains applies the Contains predicate on the "value" field. func ValueContains(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldContains(FieldValue, v)) } // ValueHasPrefix applies the HasPrefix predicate on the "value" field. func ValueHasPrefix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldHasPrefix(FieldValue, v)) } // ValueHasSuffix applies the HasSuffix predicate on the "value" field. func ValueHasSuffix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldHasSuffix(FieldValue, v)) } // ValueEqualFold applies the EqualFold predicate on the "value" field. func ValueEqualFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldEqualFold(FieldValue, v)) } // ValueContainsFold applies the ContainsFold predicate on the "value" field. func ValueContainsFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldContainsFold(FieldValue, v)) } // AlertMetasEQ applies the EQ predicate on the "alert_metas" field. func AlertMetasEQ(v int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertMetas), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldAlertMetas, v)) } // AlertMetasNEQ applies the NEQ predicate on the "alert_metas" field. func AlertMetasNEQ(v int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAlertMetas), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldAlertMetas, v)) } // AlertMetasIn applies the In predicate on the "alert_metas" field. func AlertMetasIn(vs ...int) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAlertMetas), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldAlertMetas, vs...)) } // AlertMetasNotIn applies the NotIn predicate on the "alert_metas" field. func AlertMetasNotIn(vs ...int) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAlertMetas), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldAlertMetas, vs...)) } // AlertMetasIsNil applies the IsNil predicate on the "alert_metas" field. func AlertMetasIsNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAlertMetas))) - }) + return predicate.Meta(sql.FieldIsNull(FieldAlertMetas)) } // AlertMetasNotNil applies the NotNil predicate on the "alert_metas" field. func AlertMetasNotNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAlertMetas))) - }) + return predicate.Meta(sql.FieldNotNull(FieldAlertMetas)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -525,7 +345,6 @@ func HasOwner() predicate.Meta { return predicate.Meta(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -535,11 +354,7 @@ func HasOwner() predicate.Meta { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Alert) predicate.Meta { return predicate.Meta(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -550,32 +365,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Meta { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Meta) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Meta(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Meta) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Meta(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Meta) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Meta(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go index df4f6315911..3bf30f0def9 100644 --- a/pkg/database/ent/meta_create.go +++ b/pkg/database/ent/meta_create.go @@ -101,50 +101,8 @@ func (mc *MetaCreate) Mutation() *MetaMutation { // Save creates the Meta in the database. func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) { - var ( - err error - node *Meta - ) mc.defaults() - if len(mc.hooks) == 0 { - if err = mc.check(); err != nil { - return nil, err - } - node, err = mc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mc.check(); err != nil { - return nil, err - } - mc.mutation = mutation - if node, err = mc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(mc.hooks) - 1; i >= 0; i-- { - if mc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, mc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Meta) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -198,6 +156,9 @@ func (mc *MetaCreate) check() error { } func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) { + if err := mc.check(); err != nil { + return nil, err + } _node, _spec := mc.createSpec() if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -207,50 +168,30 @@ func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true return _node, nil } func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { var ( _node = &Meta{config: mc.config} - _spec = &sqlgraph.CreateSpec{ - Table: meta.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(meta.Table, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) ) if value, ok := mc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldCreatedAt, - }) + _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := mc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldUpdatedAt, - }) + _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := mc.mutation.Key(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldKey, - }) + _spec.SetField(meta.FieldKey, field.TypeString, value) _node.Key = value } if value, ok := mc.mutation.Value(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldValue, - }) + _spec.SetField(meta.FieldValue, field.TypeString, value) _node.Value = value } if nodes := mc.mutation.OwnerIDs(); len(nodes) > 0 { @@ -261,10 +202,7 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -279,11 +217,15 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { // MetaCreateBulk is the builder for creating many Meta entities in bulk. type MetaCreateBulk struct { config + err error builders []*MetaCreate } // Save creates the Meta entities in the database. func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) { + if mcb.err != nil { + return nil, mcb.err + } specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) nodes := make([]*Meta, len(mcb.builders)) mutators := make([]Mutator, len(mcb.builders)) @@ -300,8 +242,8 @@ func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/meta_delete.go b/pkg/database/ent/meta_delete.go index e1e49d2acdc..ee25dd07eb9 100644 --- a/pkg/database/ent/meta_delete.go +++ b/pkg/database/ent/meta_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (md *MetaDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(md.hooks) == 0 { - affected, err = md.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - md.mutation = mutation - affected, err = md.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(md.hooks) - 1; i >= 0; i-- { - if md.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = md.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, md.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (md *MetaDelete) ExecX(ctx context.Context) int { } func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(meta.Table, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) if ps := md.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + md.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type MetaDeleteOne struct { md *MetaDelete } +// Where appends a list predicates to the MetaDelete builder. +func (mdo *MetaDeleteOne) Where(ps ...predicate.Meta) *MetaDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + // Exec executes the deletion query. func (mdo *MetaDeleteOne) Exec(ctx context.Context) error { n, err := mdo.md.Exec(ctx) @@ -111,5 +82,7 @@ func (mdo *MetaDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (mdo *MetaDeleteOne) ExecX(ctx context.Context) { - mdo.md.ExecX(ctx) + if err := mdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/meta_query.go b/pkg/database/ent/meta_query.go index d6fd4f3d522..87d91d09e0e 100644 --- a/pkg/database/ent/meta_query.go +++ b/pkg/database/ent/meta_query.go @@ -18,11 +18,9 @@ import ( // MetaQuery is the builder for querying Meta entities. type MetaQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []meta.OrderOption + inters []Interceptor predicates []predicate.Meta withOwner *AlertQuery // intermediate query (i.e. traversal path). @@ -36,34 +34,34 @@ func (mq *MetaQuery) Where(ps ...predicate.Meta) *MetaQuery { return mq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (mq *MetaQuery) Limit(limit int) *MetaQuery { - mq.limit = &limit + mq.ctx.Limit = &limit return mq } -// Offset adds an offset step to the query. +// Offset to start from. func (mq *MetaQuery) Offset(offset int) *MetaQuery { - mq.offset = &offset + mq.ctx.Offset = &offset return mq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (mq *MetaQuery) Unique(unique bool) *MetaQuery { - mq.unique = &unique + mq.ctx.Unique = &unique return mq } -// Order adds an order step to the query. -func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery { +// Order specifies how the records should be ordered. +func (mq *MetaQuery) Order(o ...meta.OrderOption) *MetaQuery { mq.order = append(mq.order, o...) return mq } // QueryOwner chains the current query on the "owner" edge. func (mq *MetaQuery) QueryOwner() *AlertQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := mq.prepareQuery(ctx); err != nil { return nil, err @@ -86,7 +84,7 @@ func (mq *MetaQuery) QueryOwner() *AlertQuery { // First returns the first Meta entity from the query. // Returns a *NotFoundError when no Meta was found. func (mq *MetaQuery) First(ctx context.Context) (*Meta, error) { - nodes, err := mq.Limit(1).All(ctx) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (mq *MetaQuery) FirstX(ctx context.Context) *Meta { // Returns a *NotFoundError when no Meta ID was found. func (mq *MetaQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(ctx); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -132,7 +130,7 @@ func (mq *MetaQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Meta entity is found. // Returns a *NotFoundError when no Meta entities are found. func (mq *MetaQuery) Only(ctx context.Context) (*Meta, error) { - nodes, err := mq.Limit(2).All(ctx) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) if err != nil { return nil, err } @@ -160,7 +158,7 @@ func (mq *MetaQuery) OnlyX(ctx context.Context) *Meta { // Returns a *NotFoundError when no entities are found. func (mq *MetaQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(ctx); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -185,10 +183,12 @@ func (mq *MetaQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of MetaSlice. func (mq *MetaQuery) All(ctx context.Context) ([]*Meta, error) { + ctx = setContextOp(ctx, mq.ctx, "All") if err := mq.prepareQuery(ctx); err != nil { return nil, err } - return mq.sqlAll(ctx) + qr := querierAll[[]*Meta, *MetaQuery]() + return withInterceptors[[]*Meta](ctx, mq, qr, mq.inters) } // AllX is like All, but panics if an error occurs. @@ -201,9 +201,12 @@ func (mq *MetaQuery) AllX(ctx context.Context) []*Meta { } // IDs executes the query and returns a list of Meta IDs. -func (mq *MetaQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { +func (mq *MetaQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -220,10 +223,11 @@ func (mq *MetaQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MetaQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") if err := mq.prepareQuery(ctx); err != nil { return 0, err } - return mq.sqlCount(ctx) + return withInterceptors[int](ctx, mq, querierCount[*MetaQuery](), mq.inters) } // CountX is like Count, but panics if an error occurs. @@ -237,10 +241,15 @@ func (mq *MetaQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MetaQuery) Exist(ctx context.Context) (bool, error) { - if err := mq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return mq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -260,22 +269,21 @@ func (mq *MetaQuery) Clone() *MetaQuery { } return &MetaQuery{ config: mq.config, - limit: mq.limit, - offset: mq.offset, - order: append([]OrderFunc{}, mq.order...), + ctx: mq.ctx.Clone(), + order: append([]meta.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), predicates: append([]predicate.Meta{}, mq.predicates...), withOwner: mq.withOwner.Clone(), // clone intermediate query. - sql: mq.sql.Clone(), - path: mq.path, - unique: mq.unique, + sql: mq.sql.Clone(), + path: mq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() for _, opt := range opts { opt(query) } @@ -298,16 +306,11 @@ func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy { - grbuild := &MetaGroupBy{config: mq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { - return nil, err - } - return mq.sqlQuery(ctx), nil - } + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MetaGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields grbuild.label = meta.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -324,15 +327,30 @@ func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy { // Select(meta.FieldCreatedAt). // Scan(ctx, &v) func (mq *MetaQuery) Select(fields ...string) *MetaSelect { - mq.fields = append(mq.fields, fields...) - selbuild := &MetaSelect{MetaQuery: mq} - selbuild.label = meta.Label - selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan - return selbuild + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MetaSelect{MetaQuery: mq} + sbuild.label = meta.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MetaSelect configured with the given aggregations. +func (mq *MetaQuery) Aggregate(fns ...AggregateFunc) *MetaSelect { + return mq.Select().Aggregate(fns...) } func (mq *MetaQuery) prepareQuery(ctx context.Context) error { - for _, f := range mq.fields { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { if !meta.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -392,6 +410,9 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []* } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(alert.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -411,41 +432,22 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []* func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) { _spec := mq.querySpec() - _spec.Node.Columns = mq.fields - if len(mq.fields) > 0 { - _spec.Unique = mq.unique != nil && *mq.unique + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique } return sqlgraph.CountNodes(ctx, mq.driver, _spec) } -func (mq *MetaQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := mq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - Columns: meta.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, - From: mq.sql, - Unique: true, - } - if unique := mq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true } - if fields := mq.fields; len(fields) > 0 { + if fields := mq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID) for i := range fields { @@ -453,6 +455,9 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if mq.withOwner != nil { + _spec.Node.AddColumnOnce(meta.FieldAlertMetas) + } } if ps := mq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -461,10 +466,10 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := mq.order; len(ps) > 0 { @@ -480,7 +485,7 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(mq.driver.Dialect()) t1 := builder.Table(meta.Table) - columns := mq.fields + columns := mq.ctx.Fields if len(columns) == 0 { columns = meta.Columns } @@ -489,7 +494,7 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = mq.sql selector.Select(selector.Columns(columns...)...) } - if mq.unique != nil && *mq.unique { + if mq.ctx.Unique != nil && *mq.ctx.Unique { selector.Distinct() } for _, p := range mq.predicates { @@ -498,12 +503,12 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range mq.order { p(selector) } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -511,13 +516,8 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { // MetaGroupBy is the group-by builder for Meta entities. type MetaGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *MetaQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -526,74 +526,77 @@ func (mgb *MetaGroupBy) Aggregate(fns ...AggregateFunc) *MetaGroupBy { return mgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (mgb *MetaGroupBy) Scan(ctx context.Context, v any) error { - query, err := mgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { return err } - mgb.sql = query - return mgb.sqlScan(ctx, v) + return scanWithInterceptors[*MetaQuery, *MetaGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) } -func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range mgb.fields { - if !meta.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (mgb *MetaGroupBy) sqlScan(ctx context.Context, root *MetaQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := mgb.sqlQuery() + selector.GroupBy(selector.Columns(*mgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (mgb *MetaGroupBy) sqlQuery() *sql.Selector { - selector := mgb.sql.Select() - aggregation := make([]string, 0, len(mgb.fns)) - for _, fn := range mgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) - for _, f := range mgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(mgb.fields...)...) -} - // MetaSelect is the builder for selecting fields of Meta entities. type MetaSelect struct { *MetaQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MetaSelect) Aggregate(fns ...AggregateFunc) *MetaSelect { + ms.fns = append(ms.fns, fns...) + return ms } // Scan applies the selector query and scans the result into the given value. func (ms *MetaSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") if err := ms.prepareQuery(ctx); err != nil { return err } - ms.sql = ms.MetaQuery.sqlQuery(ctx) - return ms.sqlScan(ctx, v) + return scanWithInterceptors[*MetaQuery, *MetaSelect](ctx, ms.MetaQuery, ms, ms.inters, v) } -func (ms *MetaSelect) sqlScan(ctx context.Context, v any) error { +func (ms *MetaSelect) sqlScan(ctx context.Context, root *MetaQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := ms.sql.Query() + query, args := selector.Query() if err := ms.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index 67a198dddfa..8071c4f0df5 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -117,41 +117,8 @@ func (mu *MetaUpdate) ClearOwner() *MetaUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (mu *MetaUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) mu.defaults() - if len(mu.hooks) == 0 { - if err = mu.check(); err != nil { - return 0, err - } - affected, err = mu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mu.check(); err != nil { - return 0, err - } - mu.mutation = mutation - affected, err = mu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(mu.hooks) - 1; i >= 0; i-- { - if mu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, mu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -199,16 +166,10 @@ func (mu *MetaUpdate) check() error { } func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - Columns: meta.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, + if err := mu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) if ps := mu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -217,44 +178,22 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := mu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldCreatedAt, - }) + _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } if mu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldCreatedAt, - }) + _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) } if value, ok := mu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldUpdatedAt, - }) + _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } if mu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldUpdatedAt, - }) + _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) } if value, ok := mu.mutation.Key(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldKey, - }) + _spec.SetField(meta.FieldKey, field.TypeString, value) } if value, ok := mu.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldValue, - }) + _spec.SetField(meta.FieldValue, field.TypeString, value) } if mu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -264,10 +203,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -280,10 +216,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -299,6 +232,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + mu.mutation.done = true return n, nil } @@ -396,6 +330,12 @@ func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne { return muo } +// Where appends a list predicates to the MetaUpdate builder. +func (muo *MetaUpdateOne) Where(ps ...predicate.Meta) *MetaUpdateOne { + muo.mutation.Where(ps...) + return muo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne { @@ -405,47 +345,8 @@ func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne // Save executes the query and returns the updated Meta entity. func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) { - var ( - err error - node *Meta - ) muo.defaults() - if len(muo.hooks) == 0 { - if err = muo.check(); err != nil { - return nil, err - } - node, err = muo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = muo.check(); err != nil { - return nil, err - } - muo.mutation = mutation - node, err = muo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(muo.hooks) - 1; i >= 0; i-- { - if muo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = muo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, muo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Meta) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -493,16 +394,10 @@ func (muo *MetaUpdateOne) check() error { } func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - Columns: meta.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, + if err := muo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) id, ok := muo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Meta.id" for update`)} @@ -528,44 +423,22 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) } } if value, ok := muo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldCreatedAt, - }) + _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } if muo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldCreatedAt, - }) + _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) } if value, ok := muo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldUpdatedAt, - }) + _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } if muo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldUpdatedAt, - }) + _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) } if value, ok := muo.mutation.Key(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldKey, - }) + _spec.SetField(meta.FieldKey, field.TypeString, value) } if value, ok := muo.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldValue, - }) + _spec.SetField(meta.FieldValue, field.TypeString, value) } if muo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -575,10 +448,7 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -591,10 +461,7 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -613,5 +480,6 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) } return nil, err } + muo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 907c1ef015e..c5808d0d9b8 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "entgo.io/ent" + "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" @@ -17,8 +19,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" - - "entgo.io/ent" ) const ( @@ -1578,11 +1578,26 @@ func (m *AlertMutation) Where(ps ...predicate.Alert) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AlertMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AlertMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Alert, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AlertMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AlertMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Alert). func (m *AlertMutation) Type() string { return m.typ @@ -2997,11 +3012,26 @@ func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the BouncerMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BouncerMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Bouncer, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *BouncerMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *BouncerMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Bouncer). func (m *BouncerMutation) Type() string { return m.typ @@ -3654,11 +3684,26 @@ func (m *ConfigItemMutation) Where(ps ...predicate.ConfigItem) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ConfigItemMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ConfigItemMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ConfigItem, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ConfigItemMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ConfigItemMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (ConfigItem). func (m *ConfigItemMutation) Type() string { return m.typ @@ -4830,6 +4875,7 @@ func (m *DecisionMutation) SetOwnerID(id int) { // ClearOwner clears the "owner" edge to the Alert entity. func (m *DecisionMutation) ClearOwner() { m.clearedowner = true + m.clearedFields[decision.FieldAlertDecisions] = struct{}{} } // OwnerCleared reports if the "owner" edge to the Alert entity was cleared. @@ -4866,11 +4912,26 @@ func (m *DecisionMutation) Where(ps ...predicate.Decision) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the DecisionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DecisionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Decision, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *DecisionMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *DecisionMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Decision). func (m *DecisionMutation) Type() string { return m.typ @@ -5775,6 +5836,7 @@ func (m *EventMutation) SetOwnerID(id int) { // ClearOwner clears the "owner" edge to the Alert entity. func (m *EventMutation) ClearOwner() { m.clearedowner = true + m.clearedFields[event.FieldAlertEvents] = struct{}{} } // OwnerCleared reports if the "owner" edge to the Alert entity was cleared. @@ -5811,11 +5873,26 @@ func (m *EventMutation) Where(ps ...predicate.Event) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the EventMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *EventMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Event, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *EventMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *EventMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Event). func (m *EventMutation) Type() string { return m.typ @@ -6795,11 +6872,26 @@ func (m *MachineMutation) Where(ps ...predicate.Machine) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the MachineMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MachineMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Machine, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *MachineMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *MachineMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Machine). func (m *MachineMutation) Type() string { return m.typ @@ -7565,6 +7657,7 @@ func (m *MetaMutation) SetOwnerID(id int) { // ClearOwner clears the "owner" edge to the Alert entity. func (m *MetaMutation) ClearOwner() { m.clearedowner = true + m.clearedFields[meta.FieldAlertMetas] = struct{}{} } // OwnerCleared reports if the "owner" edge to the Alert entity was cleared. @@ -7601,11 +7694,26 @@ func (m *MetaMutation) Where(ps ...predicate.Meta) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the MetaMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MetaMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Meta, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *MetaMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *MetaMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Meta). func (m *MetaMutation) Type() string { return m.typ diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index e64f7bd7554..2a645f624d7 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.11.3" // Version of ent codegen. - Sum = "h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=" // Sum of ent codegen. + Version = "v0.12.4" // Version of ent codegen. + Sum = "h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8=" // Sum of ent codegen. ) diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 2a1efd152a0..65c2ed00a44 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -30,12 +30,6 @@ type Tx struct { // lazily loaded. client *Client clientOnce sync.Once - - // completion callbacks. - mu sync.Mutex - onCommit []CommitHook - onRollback []RollbackHook - // ctx lives for the life of the transaction. It is // the same context used by the underlying connection. ctx context.Context @@ -80,9 +74,9 @@ func (tx *Tx) Commit() error { var fn Committer = CommitFunc(func(context.Context, *Tx) error { return txDriver.tx.Commit() }) - tx.mu.Lock() - hooks := append([]CommitHook(nil), tx.onCommit...) - tx.mu.Unlock() + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() for i := len(hooks) - 1; i >= 0; i-- { fn = hooks[i](fn) } @@ -91,9 +85,10 @@ func (tx *Tx) Commit() error { // OnCommit adds a hook to call on commit. func (tx *Tx) OnCommit(f CommitHook) { - tx.mu.Lock() - defer tx.mu.Unlock() - tx.onCommit = append(tx.onCommit, f) + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() } type ( @@ -135,9 +130,9 @@ func (tx *Tx) Rollback() error { var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { return txDriver.tx.Rollback() }) - tx.mu.Lock() - hooks := append([]RollbackHook(nil), tx.onRollback...) - tx.mu.Unlock() + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() for i := len(hooks) - 1; i >= 0; i-- { fn = hooks[i](fn) } @@ -146,9 +141,10 @@ func (tx *Tx) Rollback() error { // OnRollback adds a hook to call on rollback. func (tx *Tx) OnRollback(f RollbackHook) { - tx.mu.Lock() - defer tx.mu.Unlock() - tx.onRollback = append(tx.onRollback, f) + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() } // Client returns a Client that binds to current transaction. @@ -186,6 +182,10 @@ type txDriver struct { drv dialect.Driver // tx is the underlying transaction. tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook } // newTx creates a new transactional driver. From 8de8bf0e0653beb80d847741b416ad51edd00272 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 Feb 2024 11:53:12 +0100 Subject: [PATCH 028/318] pkg/hubtest: extract methods + consistent error handling (#2756) * pkg/hubtest: extract methods + consistent error handling * lint * rename variables for further refactor --- pkg/hubtest/appsecrule.go | 80 ++++++++++++++++-------------- pkg/hubtest/parser.go | 99 +++++++++++++++++++------------------ pkg/hubtest/postoverflow.go | 98 ++++++++++++++++++------------------ pkg/hubtest/scenario.go | 78 +++++++++++++++-------------- 4 files changed, 185 insertions(+), 170 deletions(-) diff --git a/pkg/hubtest/appsecrule.go b/pkg/hubtest/appsecrule.go index 9b70e1441ac..fb4ad78cc18 100644 --- a/pkg/hubtest/appsecrule.go +++ b/pkg/hubtest/appsecrule.go @@ -11,75 +11,81 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installAppsecRuleItem(hubAppsecRule *cwhub.Item) error { - appsecRuleSource, err := filepath.Abs(filepath.Join(t.HubPath, hubAppsecRule.RemotePath)) +func (t *HubTestItem) installAppsecRuleItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path of '%s': %s", appsecRuleSource, err) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - appsecRuleFilename := filepath.Base(appsecRuleSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/appsec-rules/author/appsec-rule - hubDirAppsecRuleDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubAppsecRule.RemotePath)) + hubDirAppsecRuleDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/appsec-rules/ - appsecRuleDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) + itemTypeDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) if err := os.MkdirAll(hubDirAppsecRuleDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirAppsecRuleDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirAppsecRuleDest, err) } - if err := os.MkdirAll(appsecRuleDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", appsecRuleDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/appsec-rules/crowdsecurity/rule.yaml - hubDirAppsecRulePath := filepath.Join(appsecRuleDirDest, appsecRuleFilename) - if err := Copy(appsecRuleSource, hubDirAppsecRulePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", appsecRuleSource, hubDirAppsecRulePath, err) + hubDirAppsecRulePath := filepath.Join(itemTypeDirDest, sourceFilename) + if err := Copy(sourcePath, hubDirAppsecRulePath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirAppsecRulePath, err) } // runtime/appsec-rules/rule.yaml - appsecRulePath := filepath.Join(appsecRuleDirDest, appsecRuleFilename) + appsecRulePath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirAppsecRulePath, appsecRulePath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink appsec-rule '%s' to '%s': %s", hubDirAppsecRulePath, appsecRulePath, err) + return fmt.Errorf("unable to symlink appsec-rule '%s' to '%s': %w", hubDirAppsecRulePath, appsecRulePath, err) } } return nil } +func (t *HubTestItem) installAppsecRuleCustomFrom(appsecrule string, customPath string) (bool, error) { + // we check if its a custom appsec-rule + customAppsecRulePath := filepath.Join(customPath, appsecrule) + if _, err := os.Stat(customAppsecRulePath); os.IsNotExist(err) { + return false, nil + } + + customAppsecRulePathSplit := strings.Split(customAppsecRulePath, "/") + customAppsecRuleName := customAppsecRulePathSplit[len(customAppsecRulePathSplit)-1] + + itemTypeDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + } + + customAppsecRuleDest := fmt.Sprintf("%s/appsec-rules/%s", t.RuntimePath, customAppsecRuleName) + if err := Copy(customAppsecRulePath, customAppsecRuleDest); err != nil { + return false, fmt.Errorf("unable to copy appsec-rule from '%s' to '%s': %w", customAppsecRulePath, customAppsecRuleDest, err) + } + + return true, nil +} + func (t *HubTestItem) installAppsecRuleCustom(appsecrule string) error { - customAppsecRuleExist := false for _, customPath := range t.CustomItemsLocation { - // we check if its a custom appsec-rule - customAppsecRulePath := filepath.Join(customPath, appsecrule) - if _, err := os.Stat(customAppsecRulePath); os.IsNotExist(err) { - continue - } - customAppsecRulePathSplit := strings.Split(customAppsecRulePath, "/") - customAppsecRuleName := customAppsecRulePathSplit[len(customAppsecRulePathSplit)-1] - - appsecRuleDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) - if err := os.MkdirAll(appsecRuleDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", appsecRuleDirDest, err) + found, err := t.installAppsecRuleCustomFrom(appsecrule, customPath) + if err != nil { + return err } - // runtime/appsec-rules/ - customAppsecRuleDest := fmt.Sprintf("%s/appsec-rules/%s", t.RuntimePath, customAppsecRuleName) - // if path to postoverflow exist, copy it - if err := Copy(customAppsecRulePath, customAppsecRuleDest); err != nil { - continue + if found { + return nil } - customAppsecRuleExist = true - break - } - if !customAppsecRuleExist { - return fmt.Errorf("couldn't find custom appsec-rule '%s' in the following location: %+v", appsecrule, t.CustomItemsLocation) } - return nil + return fmt.Errorf("couldn't find custom appsec-rule '%s' in the following location: %+v", appsecrule, t.CustomItemsLocation) } func (t *HubTestItem) installAppsecRule(name string) error { diff --git a/pkg/hubtest/parser.go b/pkg/hubtest/parser.go index b8dcdb8b1d0..d40301e3015 100644 --- a/pkg/hubtest/parser.go +++ b/pkg/hubtest/parser.go @@ -9,89 +9,90 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installParserItem(hubParser *cwhub.Item) error { - parserSource, err := filepath.Abs(filepath.Join(t.HubPath, hubParser.RemotePath)) +func (t *HubTestItem) installParserItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path of '%s': %s", parserSource, err) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - parserFileName := filepath.Base(parserSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/parsers/s00-raw/crowdsecurity/ - hubDirParserDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubParser.RemotePath)) + hubDirParserDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/parsers/s00-raw/ - parserDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, hubParser.Stage) + itemTypeDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, item.Stage) if err := os.MkdirAll(hubDirParserDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirParserDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirParserDest, err) } - if err := os.MkdirAll(parserDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", parserDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml - hubDirParserPath := filepath.Join(hubDirParserDest, parserFileName) - if err := Copy(parserSource, hubDirParserPath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", parserSource, hubDirParserPath, err) + hubDirParserPath := filepath.Join(hubDirParserDest, sourceFilename) + if err := Copy(sourcePath, hubDirParserPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirParserPath, err) } // runtime/parsers/s00-raw/syslog-logs.yaml - parserDirParserPath := filepath.Join(parserDirDest, parserFileName) + parserDirParserPath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirParserPath, parserDirParserPath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink parser '%s' to '%s': %s", hubDirParserPath, parserDirParserPath, err) + return fmt.Errorf("unable to symlink parser '%s' to '%s': %w", hubDirParserPath, parserDirParserPath, err) } } return nil } -func (t *HubTestItem) installParserCustom(parser string) error { - customParserExist := false - for _, customPath := range t.CustomItemsLocation { - // we check if its a custom parser - customParserPath := filepath.Join(customPath, parser) - if _, err := os.Stat(customParserPath); os.IsNotExist(err) { - continue - //return fmt.Errorf("parser '%s' doesn't exist in the hub and doesn't appear to be a custom one.", parser) - } +func (t *HubTestItem) installParserCustomFrom(parser string, customPath string) (bool, error) { + // we check if its a custom parser + customParserPath := filepath.Join(customPath, parser) + if _, err := os.Stat(customParserPath); os.IsNotExist(err) { + return false, nil + } - customParserPathSplit, customParserName := filepath.Split(customParserPath) - // because path is parsers///parser.yaml and we wan't the stage - splittedPath := strings.Split(customParserPathSplit, string(os.PathSeparator)) - customParserStage := splittedPath[len(splittedPath)-3] + customParserPathSplit, customParserName := filepath.Split(customParserPath) + // because path is parsers///parser.yaml and we wan't the stage + splitPath := strings.Split(customParserPathSplit, string(os.PathSeparator)) + customParserStage := splitPath[len(splitPath)-3] - // check if stage exist - hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("parsers/%s", customParserStage)) + // check if stage exist + hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("parsers/%s", customParserStage)) + if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { + return false, fmt.Errorf("stage '%s' extracted from '%s' doesn't exist in the hub", customParserStage, hubStagePath) + } - if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { - continue - //return fmt.Errorf("stage '%s' extracted from '%s' doesn't exist in the hub", customParserStage, hubStagePath) - } + stageDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, customParserStage) + if err := os.MkdirAll(stageDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", stageDirDest, err) + } - parserDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, customParserStage) - if err := os.MkdirAll(parserDirDest, os.ModePerm); err != nil { - continue - //return fmt.Errorf("unable to create folder '%s': %s", parserDirDest, err) - } + customParserDest := filepath.Join(stageDirDest, customParserName) + // if path to parser exist, copy it + if err := Copy(customParserPath, customParserDest); err != nil { + return false, fmt.Errorf("unable to copy custom parser '%s' to '%s': %w", customParserPath, customParserDest, err) + } + + return true, nil +} - customParserDest := filepath.Join(parserDirDest, customParserName) - // if path to parser exist, copy it - if err := Copy(customParserPath, customParserDest); err != nil { - continue - //return fmt.Errorf("unable to copy custom parser '%s' to '%s': %s", customParserPath, customParserDest, err) +func (t *HubTestItem) installParserCustom(parser string) error { + for _, customPath := range t.CustomItemsLocation { + found, err := t.installParserCustomFrom(parser, customPath) + if err != nil { + return err } - customParserExist = true - break - } - if !customParserExist { - return fmt.Errorf("couldn't find custom parser '%s' in the following location: %+v", parser, t.CustomItemsLocation) + if found { + return nil + } } - return nil + return fmt.Errorf("couldn't find custom parser '%s' in the following locations: %+v", parser, t.CustomItemsLocation) } func (t *HubTestItem) installParser(name string) error { diff --git a/pkg/hubtest/postoverflow.go b/pkg/hubtest/postoverflow.go index d5d43ddc742..76a67b58b76 100644 --- a/pkg/hubtest/postoverflow.go +++ b/pkg/hubtest/postoverflow.go @@ -9,88 +9,90 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installPostoverflowItem(hubPostOverflow *cwhub.Item) error { - postoverflowSource, err := filepath.Abs(filepath.Join(t.HubPath, hubPostOverflow.RemotePath)) +func (t *HubTestItem) installPostoverflowItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path of '%s': %s", postoverflowSource, err) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - postoverflowFileName := filepath.Base(postoverflowSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/postoverflows/s00-enrich/crowdsecurity/ - hubDirPostoverflowDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubPostOverflow.RemotePath)) + hubDirPostoverflowDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/postoverflows/s00-enrich - postoverflowDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, hubPostOverflow.Stage) + itemTypeDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, item.Stage) if err := os.MkdirAll(hubDirPostoverflowDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirPostoverflowDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirPostoverflowDest, err) } - if err := os.MkdirAll(postoverflowDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", postoverflowDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/postoverflows/s00-enrich/crowdsecurity/rdns.yaml - hubDirPostoverflowPath := filepath.Join(hubDirPostoverflowDest, postoverflowFileName) - if err := Copy(postoverflowSource, hubDirPostoverflowPath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", postoverflowSource, hubDirPostoverflowPath, err) + hubDirPostoverflowPath := filepath.Join(hubDirPostoverflowDest, sourceFilename) + if err := Copy(sourcePath, hubDirPostoverflowPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirPostoverflowPath, err) } // runtime/postoverflows/s00-enrich/rdns.yaml - postoverflowDirParserPath := filepath.Join(postoverflowDirDest, postoverflowFileName) + postoverflowDirParserPath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirPostoverflowPath, postoverflowDirParserPath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink postoverflow '%s' to '%s': %s", hubDirPostoverflowPath, postoverflowDirParserPath, err) + return fmt.Errorf("unable to symlink postoverflow '%s' to '%s': %w", hubDirPostoverflowPath, postoverflowDirParserPath, err) } } return nil } -func (t *HubTestItem) installPostoverflowCustom(postoverflow string) error { - customPostoverflowExist := false - for _, customPath := range t.CustomItemsLocation { - // we check if its a custom postoverflow - customPostOverflowPath := filepath.Join(customPath, postoverflow) - if _, err := os.Stat(customPostOverflowPath); os.IsNotExist(err) { - continue - //return fmt.Errorf("postoverflow '%s' doesn't exist in the hub and doesn't appear to be a custom one.", postoverflow) - } +func (t *HubTestItem) installPostoverflowCustomFrom(postoverflow string, customPath string) (bool, error) { + // we check if its a custom postoverflow + customPostOverflowPath := filepath.Join(customPath, postoverflow) + if _, err := os.Stat(customPostOverflowPath); os.IsNotExist(err) { + return false, nil + } - customPostOverflowPathSplit := strings.Split(customPostOverflowPath, "/") - customPostoverflowName := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-1] - // because path is postoverflows///parser.yaml and we wan't the stage - customPostoverflowStage := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-3] + customPostOverflowPathSplit := strings.Split(customPostOverflowPath, "/") + customPostoverflowName := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-1] + // because path is postoverflows///parser.yaml and we wan't the stage + customPostoverflowStage := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-3] - // check if stage exist - hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("postoverflows/%s", customPostoverflowStage)) + // check if stage exist + hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("postoverflows/%s", customPostoverflowStage)) + if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { + return false, fmt.Errorf("stage '%s' from extracted '%s' doesn't exist in the hub", customPostoverflowStage, hubStagePath) + } - if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { - continue - //return fmt.Errorf("stage '%s' from extracted '%s' doesn't exist in the hub", customPostoverflowStage, hubStagePath) - } + stageDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, customPostoverflowStage) + if err := os.MkdirAll(stageDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", stageDirDest, err) + } - postoverflowDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, customPostoverflowStage) - if err := os.MkdirAll(postoverflowDirDest, os.ModePerm); err != nil { - continue - //return fmt.Errorf("unable to create folder '%s': %s", postoverflowDirDest, err) + customPostoverflowDest := filepath.Join(stageDirDest, customPostoverflowName) + // if path to postoverflow exist, copy it + if err := Copy(customPostOverflowPath, customPostoverflowDest); err != nil { + return false, fmt.Errorf("unable to copy custom parser '%s' to '%s': %w", customPostOverflowPath, customPostoverflowDest, err) + } + + return true, nil +} + +func (t *HubTestItem) installPostoverflowCustom(postoverflow string) error { + for _, customPath := range t.CustomItemsLocation { + found, err := t.installPostoverflowCustomFrom(postoverflow, customPath) + if err != nil { + return err } - customPostoverflowDest := filepath.Join(postoverflowDirDest, customPostoverflowName) - // if path to postoverflow exist, copy it - if err := Copy(customPostOverflowPath, customPostoverflowDest); err != nil { - continue - //return fmt.Errorf("unable to copy custom parser '%s' to '%s': %s", customPostOverflowPath, customPostoverflowDest, err) + if found { + return nil } - customPostoverflowExist = true - break - } - if !customPostoverflowExist { - return fmt.Errorf("couldn't find custom postoverflow '%s' in the following location: %+v", postoverflow, t.CustomItemsLocation) } - return nil + return fmt.Errorf("couldn't find custom postoverflow '%s' in the following location: %+v", postoverflow, t.CustomItemsLocation) } func (t *HubTestItem) installPostoverflow(name string) error { diff --git a/pkg/hubtest/scenario.go b/pkg/hubtest/scenario.go index eaa831d8013..35ea465b7c0 100644 --- a/pkg/hubtest/scenario.go +++ b/pkg/hubtest/scenario.go @@ -8,74 +8,80 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installScenarioItem(hubScenario *cwhub.Item) error { - scenarioSource, err := filepath.Abs(filepath.Join(t.HubPath, hubScenario.RemotePath)) +func (t *HubTestItem) installScenarioItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path to: %s", scenarioSource) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - scenarioFileName := filepath.Base(scenarioSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/scenarios/crowdsecurity/ - hubDirScenarioDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubScenario.RemotePath)) + hubDirScenarioDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/parsers/scenarios/ - scenarioDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) + itemTypeDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) if err := os.MkdirAll(hubDirScenarioDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirScenarioDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirScenarioDest, err) } - if err := os.MkdirAll(scenarioDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", scenarioDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/scenarios/crowdsecurity/ssh-bf.yaml - hubDirScenarioPath := filepath.Join(hubDirScenarioDest, scenarioFileName) - if err := Copy(scenarioSource, hubDirScenarioPath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", scenarioSource, hubDirScenarioPath, err) + hubDirScenarioPath := filepath.Join(hubDirScenarioDest, sourceFilename) + if err := Copy(sourcePath, hubDirScenarioPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirScenarioPath, err) } // runtime/scenarios/ssh-bf.yaml - scenarioDirParserPath := filepath.Join(scenarioDirDest, scenarioFileName) + scenarioDirParserPath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirScenarioPath, scenarioDirParserPath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink scenario '%s' to '%s': %s", hubDirScenarioPath, scenarioDirParserPath, err) + return fmt.Errorf("unable to symlink scenario '%s' to '%s': %w", hubDirScenarioPath, scenarioDirParserPath, err) } } return nil } +func (t *HubTestItem) installScenarioCustomFrom(scenario string, customPath string) (bool, error) { + // we check if its a custom scenario + customScenarioPath := filepath.Join(customPath, scenario) + if _, err := os.Stat(customScenarioPath); os.IsNotExist(err) { + return false, nil + } + + itemTypeDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + } + + scenarioFileName := filepath.Base(customScenarioPath) + + scenarioFileDest := filepath.Join(itemTypeDirDest, scenarioFileName) + if err := Copy(customScenarioPath, scenarioFileDest); err != nil { + return false, fmt.Errorf("unable to copy scenario from '%s' to '%s': %w", customScenarioPath, scenarioFileDest, err) + } + + return true, nil +} + func (t *HubTestItem) installScenarioCustom(scenario string) error { - customScenarioExist := false for _, customPath := range t.CustomItemsLocation { - // we check if its a custom scenario - customScenarioPath := filepath.Join(customPath, scenario) - if _, err := os.Stat(customScenarioPath); os.IsNotExist(err) { - continue - //return fmt.Errorf("scenarios '%s' doesn't exist in the hub and doesn't appear to be a custom one.", scenario) - } - - scenarioDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) - if err := os.MkdirAll(scenarioDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", scenarioDirDest, err) + found, err := t.installScenarioCustomFrom(scenario, customPath) + if err != nil { + return err } - scenarioFileName := filepath.Base(customScenarioPath) - scenarioFileDest := filepath.Join(scenarioDirDest, scenarioFileName) - if err := Copy(customScenarioPath, scenarioFileDest); err != nil { - continue - //return fmt.Errorf("unable to copy scenario from '%s' to '%s': %s", customScenarioPath, scenarioFileDest, err) + if found { + return nil } - customScenarioExist = true - break - } - if !customScenarioExist { - return fmt.Errorf("couldn't find custom scenario '%s' in the following location: %+v", scenario, t.CustomItemsLocation) } - return nil + return fmt.Errorf("couldn't find custom scenario '%s' in the following location: %+v", scenario, t.CustomItemsLocation) } func (t *HubTestItem) installScenario(name string) error { From 97c441dab6c387d8aff77c2b07a256b0b0321f16 Mon Sep 17 00:00:00 2001 From: he2ss Date: Wed, 14 Feb 2024 12:26:42 +0100 Subject: [PATCH 029/318] implement highAvailability feature (#2506) * implement highAvailability feature --------- Co-authored-by: Marco Mariani --- pkg/apiserver/apic.go | 12 + pkg/apiserver/apic_metrics_test.go | 8 +- pkg/database/ent/client.go | 152 ++++++- pkg/database/ent/ent.go | 2 + pkg/database/ent/hook/hook.go | 12 + pkg/database/ent/lock.go | 117 ++++++ pkg/database/ent/lock/lock.go | 62 +++ pkg/database/ent/lock/where.go | 185 +++++++++ pkg/database/ent/lock_create.go | 215 ++++++++++ pkg/database/ent/lock_delete.go | 88 ++++ pkg/database/ent/lock_query.go | 526 ++++++++++++++++++++++++ pkg/database/ent/lock_update.go | 228 ++++++++++ pkg/database/ent/migrate/schema.go | 13 + pkg/database/ent/mutation.go | 382 +++++++++++++++++ pkg/database/ent/predicate/predicate.go | 3 + pkg/database/ent/runtime.go | 7 + pkg/database/ent/schema/lock.go | 22 + pkg/database/ent/tx.go | 3 + pkg/database/lock.go | 67 +++ 19 files changed, 2096 insertions(+), 8 deletions(-) create mode 100644 pkg/database/ent/lock.go create mode 100644 pkg/database/ent/lock/lock.go create mode 100644 pkg/database/ent/lock/where.go create mode 100644 pkg/database/ent/lock_create.go create mode 100644 pkg/database/ent/lock_delete.go create mode 100644 pkg/database/ent/lock_query.go create mode 100644 pkg/database/ent/lock_update.go create mode 100644 pkg/database/ent/schema/lock.go create mode 100644 pkg/database/lock.go diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index d0b205c254d..2fdb01144a0 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -633,6 +633,13 @@ func (a *apic) PullTop(forcePull bool) error { } } + log.Debug("Acquiring lock for pullCAPI") + err = a.dbClient.AcquirePullCAPILock() + if a.dbClient.IsLocked(err) { + log.Info("PullCAPI is already running, skipping") + return nil + } + log.Infof("Starting community-blocklist update") data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) @@ -684,6 +691,11 @@ func (a *apic) PullTop(forcePull bool) error { return fmt.Errorf("while updating blocklists: %w", err) } + log.Debug("Releasing lock for pullCAPI") + if err := a.dbClient.ReleasePullCAPILock(); err != nil { + return fmt.Errorf("while releasing lock: %w", err) + } + return nil } diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 2bc0dd26966..529dd6c6839 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -26,15 +26,15 @@ func TestAPICSendMetrics(t *testing.T) { }{ { name: "basic", - duration: time.Millisecond * 60, - metricsInterval: time.Millisecond * 10, + duration: time.Millisecond * 120, + metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) {}, }, { name: "with some metrics", - duration: time.Millisecond * 60, - metricsInterval: time.Millisecond * 10, + duration: time.Millisecond * 120, + metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) { api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 2761ff088b5..006d52ef9ba 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" ) @@ -39,6 +40,8 @@ type Client struct { Decision *DecisionClient // Event is the client for interacting with the Event builders. Event *EventClient + // Lock is the client for interacting with the Lock builders. + Lock *LockClient // Machine is the client for interacting with the Machine builders. Machine *MachineClient // Meta is the client for interacting with the Meta builders. @@ -61,6 +64,7 @@ func (c *Client) init() { c.ConfigItem = NewConfigItemClient(c.config) c.Decision = NewDecisionClient(c.config) c.Event = NewEventClient(c.config) + c.Lock = NewLockClient(c.config) c.Machine = NewMachineClient(c.config) c.Meta = NewMetaClient(c.config) } @@ -153,6 +157,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { ConfigItem: NewConfigItemClient(cfg), Decision: NewDecisionClient(cfg), Event: NewEventClient(cfg), + Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), }, nil @@ -179,6 +184,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) ConfigItem: NewConfigItemClient(cfg), Decision: NewDecisionClient(cfg), Event: NewEventClient(cfg), + Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), }, nil @@ -210,7 +216,8 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, + c.Meta, } { n.Use(hooks...) } @@ -220,7 +227,8 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, + c.Meta, } { n.Intercept(interceptors...) } @@ -239,6 +247,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Decision.mutate(ctx, m) case *EventMutation: return c.Event.mutate(ctx, m) + case *LockMutation: + return c.Lock.mutate(ctx, m) case *MachineMutation: return c.Machine.mutate(ctx, m) case *MetaMutation: @@ -1009,6 +1019,139 @@ func (c *EventClient) mutate(ctx context.Context, m *EventMutation) (Value, erro } } +// LockClient is a client for the Lock schema. +type LockClient struct { + config +} + +// NewLockClient returns a client for the Lock from the given config. +func NewLockClient(c config) *LockClient { + return &LockClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `lock.Hooks(f(g(h())))`. +func (c *LockClient) Use(hooks ...Hook) { + c.hooks.Lock = append(c.hooks.Lock, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `lock.Intercept(f(g(h())))`. +func (c *LockClient) Intercept(interceptors ...Interceptor) { + c.inters.Lock = append(c.inters.Lock, interceptors...) +} + +// Create returns a builder for creating a Lock entity. +func (c *LockClient) Create() *LockCreate { + mutation := newLockMutation(c.config, OpCreate) + return &LockCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Lock entities. +func (c *LockClient) CreateBulk(builders ...*LockCreate) *LockCreateBulk { + return &LockCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *LockClient) MapCreateBulk(slice any, setFunc func(*LockCreate, int)) *LockCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &LockCreateBulk{err: fmt.Errorf("calling to LockClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*LockCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &LockCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Lock. +func (c *LockClient) Update() *LockUpdate { + mutation := newLockMutation(c.config, OpUpdate) + return &LockUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *LockClient) UpdateOne(l *Lock) *LockUpdateOne { + mutation := newLockMutation(c.config, OpUpdateOne, withLock(l)) + return &LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *LockClient) UpdateOneID(id int) *LockUpdateOne { + mutation := newLockMutation(c.config, OpUpdateOne, withLockID(id)) + return &LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Lock. +func (c *LockClient) Delete() *LockDelete { + mutation := newLockMutation(c.config, OpDelete) + return &LockDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *LockClient) DeleteOne(l *Lock) *LockDeleteOne { + return c.DeleteOneID(l.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *LockClient) DeleteOneID(id int) *LockDeleteOne { + builder := c.Delete().Where(lock.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &LockDeleteOne{builder} +} + +// Query returns a query builder for Lock. +func (c *LockClient) Query() *LockQuery { + return &LockQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeLock}, + inters: c.Interceptors(), + } +} + +// Get returns a Lock entity by its id. +func (c *LockClient) Get(ctx context.Context, id int) (*Lock, error) { + return c.Query().Where(lock.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *LockClient) GetX(ctx context.Context, id int) *Lock { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *LockClient) Hooks() []Hook { + return c.hooks.Lock +} + +// Interceptors returns the client interceptors. +func (c *LockClient) Interceptors() []Interceptor { + return c.inters.Lock +} + +func (c *LockClient) mutate(ctx context.Context, m *LockMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&LockCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&LockUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&LockDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Lock mutation op: %q", m.Op()) + } +} + // MachineClient is a client for the Machine schema. type MachineClient struct { config @@ -1310,9 +1453,10 @@ func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) // hooks and interceptors per client, for fast access. type ( hooks struct { - Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Hook + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta []ent.Hook } inters struct { - Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Interceptor + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, + Meta []ent.Interceptor } ) diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index 393ce9f1869..cb98ee9301c 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" ) @@ -84,6 +85,7 @@ func checkColumn(table, column string) error { configitem.Table: configitem.ValidColumn, decision.Table: decision.ValidColumn, event.Table: event.ValidColumn, + lock.Table: lock.ValidColumn, machine.Table: machine.ValidColumn, meta.Table: meta.ValidColumn, }) diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index 7ec9c3ab1d8..fdc31539679 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -69,6 +69,18 @@ func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) } +// The LockFunc type is an adapter to allow the use of ordinary +// function as Lock mutator. +type LockFunc func(context.Context, *ent.LockMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f LockFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.LockMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LockMutation", m) +} + // The MachineFunc type is an adapter to allow the use of ordinary // function as Machine mutator. type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) diff --git a/pkg/database/ent/lock.go b/pkg/database/ent/lock.go new file mode 100644 index 00000000000..85556a30644 --- /dev/null +++ b/pkg/database/ent/lock.go @@ -0,0 +1,117 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" +) + +// Lock is the model entity for the Lock schema. +type Lock struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Lock) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case lock.FieldID: + values[i] = new(sql.NullInt64) + case lock.FieldName: + values[i] = new(sql.NullString) + case lock.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Lock fields. +func (l *Lock) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case lock.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + l.ID = int(value.Int64) + case lock.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + l.Name = value.String + } + case lock.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + l.CreatedAt = value.Time + } + default: + l.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Lock. +// This includes values selected through modifiers, order, etc. +func (l *Lock) Value(name string) (ent.Value, error) { + return l.selectValues.Get(name) +} + +// Update returns a builder for updating this Lock. +// Note that you need to call Lock.Unwrap() before calling this method if this Lock +// was returned from a transaction, and the transaction was committed or rolled back. +func (l *Lock) Update() *LockUpdateOne { + return NewLockClient(l.config).UpdateOne(l) +} + +// Unwrap unwraps the Lock entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (l *Lock) Unwrap() *Lock { + _tx, ok := l.config.driver.(*txDriver) + if !ok { + panic("ent: Lock is not a transactional entity") + } + l.config.driver = _tx.drv + return l +} + +// String implements the fmt.Stringer. +func (l *Lock) String() string { + var builder strings.Builder + builder.WriteString("Lock(") + builder.WriteString(fmt.Sprintf("id=%v, ", l.ID)) + builder.WriteString("name=") + builder.WriteString(l.Name) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(l.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Locks is a parsable slice of Lock. +type Locks []*Lock diff --git a/pkg/database/ent/lock/lock.go b/pkg/database/ent/lock/lock.go new file mode 100644 index 00000000000..d0143470a75 --- /dev/null +++ b/pkg/database/ent/lock/lock.go @@ -0,0 +1,62 @@ +// Code generated by ent, DO NOT EDIT. + +package lock + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the lock type in the database. + Label = "lock" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // Table holds the table name of the lock in the database. + Table = "locks" +) + +// Columns holds all SQL columns for lock fields. +var Columns = []string{ + FieldID, + FieldName, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Lock queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} diff --git a/pkg/database/ent/lock/where.go b/pkg/database/ent/lock/where.go new file mode 100644 index 00000000000..cf59362d203 --- /dev/null +++ b/pkg/database/ent/lock/where.go @@ -0,0 +1,185 @@ +// Code generated by ent, DO NOT EDIT. + +package lock + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Lock { + return predicate.Lock(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Lock { + return predicate.Lock(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Lock { + return predicate.Lock(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Lock { + return predicate.Lock(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Lock { + return predicate.Lock(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Lock { + return predicate.Lock(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Lock { + return predicate.Lock(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldName, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldCreatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Lock { + return predicate.Lock(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Lock { + return predicate.Lock(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Lock { + return predicate.Lock(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Lock { + return predicate.Lock(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Lock { + return predicate.Lock(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Lock { + return predicate.Lock(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Lock { + return predicate.Lock(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Lock { + return predicate.Lock(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Lock { + return predicate.Lock(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Lock { + return predicate.Lock(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Lock { + return predicate.Lock(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Lock { + return predicate.Lock(sql.FieldContainsFold(FieldName, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Lock { + return predicate.Lock(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Lock { + return predicate.Lock(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldLTE(FieldCreatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Lock) predicate.Lock { + return predicate.Lock(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Lock) predicate.Lock { + return predicate.Lock(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Lock) predicate.Lock { + return predicate.Lock(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/lock_create.go b/pkg/database/ent/lock_create.go new file mode 100644 index 00000000000..e2c29c88324 --- /dev/null +++ b/pkg/database/ent/lock_create.go @@ -0,0 +1,215 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" +) + +// LockCreate is the builder for creating a Lock entity. +type LockCreate struct { + config + mutation *LockMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (lc *LockCreate) SetName(s string) *LockCreate { + lc.mutation.SetName(s) + return lc +} + +// SetCreatedAt sets the "created_at" field. +func (lc *LockCreate) SetCreatedAt(t time.Time) *LockCreate { + lc.mutation.SetCreatedAt(t) + return lc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (lc *LockCreate) SetNillableCreatedAt(t *time.Time) *LockCreate { + if t != nil { + lc.SetCreatedAt(*t) + } + return lc +} + +// Mutation returns the LockMutation object of the builder. +func (lc *LockCreate) Mutation() *LockMutation { + return lc.mutation +} + +// Save creates the Lock in the database. +func (lc *LockCreate) Save(ctx context.Context) (*Lock, error) { + lc.defaults() + return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (lc *LockCreate) SaveX(ctx context.Context) *Lock { + v, err := lc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (lc *LockCreate) Exec(ctx context.Context) error { + _, err := lc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (lc *LockCreate) ExecX(ctx context.Context) { + if err := lc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (lc *LockCreate) defaults() { + if _, ok := lc.mutation.CreatedAt(); !ok { + v := lock.DefaultCreatedAt() + lc.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (lc *LockCreate) check() error { + if _, ok := lc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Lock.name"`)} + } + if _, ok := lc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Lock.created_at"`)} + } + return nil +} + +func (lc *LockCreate) sqlSave(ctx context.Context) (*Lock, error) { + if err := lc.check(); err != nil { + return nil, err + } + _node, _spec := lc.createSpec() + if err := sqlgraph.CreateNode(ctx, lc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + lc.mutation.id = &_node.ID + lc.mutation.done = true + return _node, nil +} + +func (lc *LockCreate) createSpec() (*Lock, *sqlgraph.CreateSpec) { + var ( + _node = &Lock{config: lc.config} + _spec = sqlgraph.NewCreateSpec(lock.Table, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + ) + if value, ok := lc.mutation.Name(); ok { + _spec.SetField(lock.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := lc.mutation.CreatedAt(); ok { + _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + return _node, _spec +} + +// LockCreateBulk is the builder for creating many Lock entities in bulk. +type LockCreateBulk struct { + config + err error + builders []*LockCreate +} + +// Save creates the Lock entities in the database. +func (lcb *LockCreateBulk) Save(ctx context.Context) ([]*Lock, error) { + if lcb.err != nil { + return nil, lcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(lcb.builders)) + nodes := make([]*Lock, len(lcb.builders)) + mutators := make([]Mutator, len(lcb.builders)) + for i := range lcb.builders { + func(i int, root context.Context) { + builder := lcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*LockMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, lcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, lcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (lcb *LockCreateBulk) SaveX(ctx context.Context) []*Lock { + v, err := lcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (lcb *LockCreateBulk) Exec(ctx context.Context) error { + _, err := lcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (lcb *LockCreateBulk) ExecX(ctx context.Context) { + if err := lcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/lock_delete.go b/pkg/database/ent/lock_delete.go new file mode 100644 index 00000000000..2275c608f75 --- /dev/null +++ b/pkg/database/ent/lock_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// LockDelete is the builder for deleting a Lock entity. +type LockDelete struct { + config + hooks []Hook + mutation *LockMutation +} + +// Where appends a list predicates to the LockDelete builder. +func (ld *LockDelete) Where(ps ...predicate.Lock) *LockDelete { + ld.mutation.Where(ps...) + return ld +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ld *LockDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ld *LockDelete) ExecX(ctx context.Context) int { + n, err := ld.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ld *LockDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(lock.Table, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + if ps := ld.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ld.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ld.mutation.done = true + return affected, err +} + +// LockDeleteOne is the builder for deleting a single Lock entity. +type LockDeleteOne struct { + ld *LockDelete +} + +// Where appends a list predicates to the LockDelete builder. +func (ldo *LockDeleteOne) Where(ps ...predicate.Lock) *LockDeleteOne { + ldo.ld.mutation.Where(ps...) + return ldo +} + +// Exec executes the deletion query. +func (ldo *LockDeleteOne) Exec(ctx context.Context) error { + n, err := ldo.ld.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{lock.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ldo *LockDeleteOne) ExecX(ctx context.Context) { + if err := ldo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/lock_query.go b/pkg/database/ent/lock_query.go new file mode 100644 index 00000000000..75e5da48a94 --- /dev/null +++ b/pkg/database/ent/lock_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// LockQuery is the builder for querying Lock entities. +type LockQuery struct { + config + ctx *QueryContext + order []lock.OrderOption + inters []Interceptor + predicates []predicate.Lock + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the LockQuery builder. +func (lq *LockQuery) Where(ps ...predicate.Lock) *LockQuery { + lq.predicates = append(lq.predicates, ps...) + return lq +} + +// Limit the number of records to be returned by this query. +func (lq *LockQuery) Limit(limit int) *LockQuery { + lq.ctx.Limit = &limit + return lq +} + +// Offset to start from. +func (lq *LockQuery) Offset(offset int) *LockQuery { + lq.ctx.Offset = &offset + return lq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (lq *LockQuery) Unique(unique bool) *LockQuery { + lq.ctx.Unique = &unique + return lq +} + +// Order specifies how the records should be ordered. +func (lq *LockQuery) Order(o ...lock.OrderOption) *LockQuery { + lq.order = append(lq.order, o...) + return lq +} + +// First returns the first Lock entity from the query. +// Returns a *NotFoundError when no Lock was found. +func (lq *LockQuery) First(ctx context.Context) (*Lock, error) { + nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{lock.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (lq *LockQuery) FirstX(ctx context.Context) *Lock { + node, err := lq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Lock ID from the query. +// Returns a *NotFoundError when no Lock ID was found. +func (lq *LockQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{lock.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (lq *LockQuery) FirstIDX(ctx context.Context) int { + id, err := lq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Lock entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Lock entity is found. +// Returns a *NotFoundError when no Lock entities are found. +func (lq *LockQuery) Only(ctx context.Context) (*Lock, error) { + nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{lock.Label} + default: + return nil, &NotSingularError{lock.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (lq *LockQuery) OnlyX(ctx context.Context) *Lock { + node, err := lq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Lock ID in the query. +// Returns a *NotSingularError when more than one Lock ID is found. +// Returns a *NotFoundError when no entities are found. +func (lq *LockQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{lock.Label} + default: + err = &NotSingularError{lock.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (lq *LockQuery) OnlyIDX(ctx context.Context) int { + id, err := lq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Locks. +func (lq *LockQuery) All(ctx context.Context) ([]*Lock, error) { + ctx = setContextOp(ctx, lq.ctx, "All") + if err := lq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Lock, *LockQuery]() + return withInterceptors[[]*Lock](ctx, lq, qr, lq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (lq *LockQuery) AllX(ctx context.Context) []*Lock { + nodes, err := lq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Lock IDs. +func (lq *LockQuery) IDs(ctx context.Context) (ids []int, err error) { + if lq.ctx.Unique == nil && lq.path != nil { + lq.Unique(true) + } + ctx = setContextOp(ctx, lq.ctx, "IDs") + if err = lq.Select(lock.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (lq *LockQuery) IDsX(ctx context.Context) []int { + ids, err := lq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (lq *LockQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, lq.ctx, "Count") + if err := lq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, lq, querierCount[*LockQuery](), lq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (lq *LockQuery) CountX(ctx context.Context) int { + count, err := lq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (lq *LockQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, lq.ctx, "Exist") + switch _, err := lq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (lq *LockQuery) ExistX(ctx context.Context) bool { + exist, err := lq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the LockQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (lq *LockQuery) Clone() *LockQuery { + if lq == nil { + return nil + } + return &LockQuery{ + config: lq.config, + ctx: lq.ctx.Clone(), + order: append([]lock.OrderOption{}, lq.order...), + inters: append([]Interceptor{}, lq.inters...), + predicates: append([]predicate.Lock{}, lq.predicates...), + // clone intermediate query. + sql: lq.sql.Clone(), + path: lq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name"` +// Count int `json:"count,omitempty"` +// } +// +// client.Lock.Query(). +// GroupBy(lock.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (lq *LockQuery) GroupBy(field string, fields ...string) *LockGroupBy { + lq.ctx.Fields = append([]string{field}, fields...) + grbuild := &LockGroupBy{build: lq} + grbuild.flds = &lq.ctx.Fields + grbuild.label = lock.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name"` +// } +// +// client.Lock.Query(). +// Select(lock.FieldName). +// Scan(ctx, &v) +func (lq *LockQuery) Select(fields ...string) *LockSelect { + lq.ctx.Fields = append(lq.ctx.Fields, fields...) + sbuild := &LockSelect{LockQuery: lq} + sbuild.label = lock.Label + sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a LockSelect configured with the given aggregations. +func (lq *LockQuery) Aggregate(fns ...AggregateFunc) *LockSelect { + return lq.Select().Aggregate(fns...) +} + +func (lq *LockQuery) prepareQuery(ctx context.Context) error { + for _, inter := range lq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, lq); err != nil { + return err + } + } + } + for _, f := range lq.ctx.Fields { + if !lock.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if lq.path != nil { + prev, err := lq.path(ctx) + if err != nil { + return err + } + lq.sql = prev + } + return nil +} + +func (lq *LockQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Lock, error) { + var ( + nodes = []*Lock{} + _spec = lq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Lock).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Lock{config: lq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, lq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (lq *LockQuery) sqlCount(ctx context.Context) (int, error) { + _spec := lq.querySpec() + _spec.Node.Columns = lq.ctx.Fields + if len(lq.ctx.Fields) > 0 { + _spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, lq.driver, _spec) +} + +func (lq *LockQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(lock.Table, lock.Columns, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + _spec.From = lq.sql + if unique := lq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if lq.path != nil { + _spec.Unique = true + } + if fields := lq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, lock.FieldID) + for i := range fields { + if fields[i] != lock.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := lq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := lq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := lq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := lq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (lq *LockQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(lq.driver.Dialect()) + t1 := builder.Table(lock.Table) + columns := lq.ctx.Fields + if len(columns) == 0 { + columns = lock.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if lq.sql != nil { + selector = lq.sql + selector.Select(selector.Columns(columns...)...) + } + if lq.ctx.Unique != nil && *lq.ctx.Unique { + selector.Distinct() + } + for _, p := range lq.predicates { + p(selector) + } + for _, p := range lq.order { + p(selector) + } + if offset := lq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := lq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// LockGroupBy is the group-by builder for Lock entities. +type LockGroupBy struct { + selector + build *LockQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (lgb *LockGroupBy) Aggregate(fns ...AggregateFunc) *LockGroupBy { + lgb.fns = append(lgb.fns, fns...) + return lgb +} + +// Scan applies the selector query and scans the result into the given value. +func (lgb *LockGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy") + if err := lgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*LockQuery, *LockGroupBy](ctx, lgb.build, lgb, lgb.build.inters, v) +} + +func (lgb *LockGroupBy) sqlScan(ctx context.Context, root *LockQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(lgb.fns)) + for _, fn := range lgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*lgb.flds)+len(lgb.fns)) + for _, f := range *lgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*lgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := lgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// LockSelect is the builder for selecting fields of Lock entities. +type LockSelect struct { + *LockQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ls *LockSelect) Aggregate(fns ...AggregateFunc) *LockSelect { + ls.fns = append(ls.fns, fns...) + return ls +} + +// Scan applies the selector query and scans the result into the given value. +func (ls *LockSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ls.ctx, "Select") + if err := ls.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*LockQuery, *LockSelect](ctx, ls.LockQuery, ls, ls.inters, v) +} + +func (ls *LockSelect) sqlScan(ctx context.Context, root *LockQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ls.fns)) + for _, fn := range ls.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ls.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ls.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go new file mode 100644 index 00000000000..f4deda6e3a8 --- /dev/null +++ b/pkg/database/ent/lock_update.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// LockUpdate is the builder for updating Lock entities. +type LockUpdate struct { + config + hooks []Hook + mutation *LockMutation +} + +// Where appends a list predicates to the LockUpdate builder. +func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { + lu.mutation.Where(ps...) + return lu +} + +// SetName sets the "name" field. +func (lu *LockUpdate) SetName(s string) *LockUpdate { + lu.mutation.SetName(s) + return lu +} + +// SetCreatedAt sets the "created_at" field. +func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { + lu.mutation.SetCreatedAt(t) + return lu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (lu *LockUpdate) SetNillableCreatedAt(t *time.Time) *LockUpdate { + if t != nil { + lu.SetCreatedAt(*t) + } + return lu +} + +// Mutation returns the LockMutation object of the builder. +func (lu *LockUpdate) Mutation() *LockMutation { + return lu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (lu *LockUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (lu *LockUpdate) SaveX(ctx context.Context) int { + affected, err := lu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (lu *LockUpdate) Exec(ctx context.Context) error { + _, err := lu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (lu *LockUpdate) ExecX(ctx context.Context) { + if err := lu.Exec(ctx); err != nil { + panic(err) + } +} + +func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(lock.Table, lock.Columns, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + if ps := lu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := lu.mutation.Name(); ok { + _spec.SetField(lock.FieldName, field.TypeString, value) + } + if value, ok := lu.mutation.CreatedAt(); ok { + _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, lu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{lock.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + lu.mutation.done = true + return n, nil +} + +// LockUpdateOne is the builder for updating a single Lock entity. +type LockUpdateOne struct { + config + fields []string + hooks []Hook + mutation *LockMutation +} + +// SetName sets the "name" field. +func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { + luo.mutation.SetName(s) + return luo +} + +// SetCreatedAt sets the "created_at" field. +func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { + luo.mutation.SetCreatedAt(t) + return luo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (luo *LockUpdateOne) SetNillableCreatedAt(t *time.Time) *LockUpdateOne { + if t != nil { + luo.SetCreatedAt(*t) + } + return luo +} + +// Mutation returns the LockMutation object of the builder. +func (luo *LockUpdateOne) Mutation() *LockMutation { + return luo.mutation +} + +// Where appends a list predicates to the LockUpdate builder. +func (luo *LockUpdateOne) Where(ps ...predicate.Lock) *LockUpdateOne { + luo.mutation.Where(ps...) + return luo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (luo *LockUpdateOne) Select(field string, fields ...string) *LockUpdateOne { + luo.fields = append([]string{field}, fields...) + return luo +} + +// Save executes the query and returns the updated Lock entity. +func (luo *LockUpdateOne) Save(ctx context.Context) (*Lock, error) { + return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (luo *LockUpdateOne) SaveX(ctx context.Context) *Lock { + node, err := luo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (luo *LockUpdateOne) Exec(ctx context.Context) error { + _, err := luo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (luo *LockUpdateOne) ExecX(ctx context.Context) { + if err := luo.Exec(ctx); err != nil { + panic(err) + } +} + +func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) { + _spec := sqlgraph.NewUpdateSpec(lock.Table, lock.Columns, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + id, ok := luo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Lock.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := luo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, lock.FieldID) + for _, f := range fields { + if !lock.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != lock.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := luo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := luo.mutation.Name(); ok { + _spec.SetField(lock.FieldName, field.TypeString, value) + } + if value, ok := luo.mutation.CreatedAt(); ok { + _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) + } + _node = &Lock{config: luo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, luo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{lock.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + luo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 375fd4e784a..c3ffed42239 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -178,6 +178,18 @@ var ( }, }, } + // LocksColumns holds the columns for the "locks" table. + LocksColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Unique: true}, + {Name: "created_at", Type: field.TypeTime}, + } + // LocksTable holds the schema information for the "locks" table. + LocksTable = &schema.Table{ + Name: "locks", + Columns: LocksColumns, + PrimaryKey: []*schema.Column{LocksColumns[0]}, + } // MachinesColumns holds the columns for the "machines" table. MachinesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, @@ -237,6 +249,7 @@ var ( ConfigItemsTable, DecisionsTable, EventsTable, + LocksTable, MachinesTable, MetaTable, } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index c5808d0d9b8..365824de739 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -16,6 +16,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" @@ -35,6 +36,7 @@ const ( TypeConfigItem = "ConfigItem" TypeDecision = "Decision" TypeEvent = "Event" + TypeLock = "Lock" TypeMachine = "Machine" TypeMeta = "Meta" ) @@ -6165,6 +6167,386 @@ func (m *EventMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Event edge %s", name) } +// LockMutation represents an operation that mutates the Lock nodes in the graph. +type LockMutation struct { + config + op Op + typ string + id *int + name *string + created_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Lock, error) + predicates []predicate.Lock +} + +var _ ent.Mutation = (*LockMutation)(nil) + +// lockOption allows management of the mutation configuration using functional options. +type lockOption func(*LockMutation) + +// newLockMutation creates new mutation for the Lock entity. +func newLockMutation(c config, op Op, opts ...lockOption) *LockMutation { + m := &LockMutation{ + config: c, + op: op, + typ: TypeLock, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withLockID sets the ID field of the mutation. +func withLockID(id int) lockOption { + return func(m *LockMutation) { + var ( + err error + once sync.Once + value *Lock + ) + m.oldValue = func(ctx context.Context) (*Lock, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Lock.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withLock sets the old Lock of the mutation. +func withLock(node *Lock) lockOption { + return func(m *LockMutation) { + m.oldValue = func(context.Context) (*Lock, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m LockMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m LockMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *LockMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *LockMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Lock.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *LockMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *LockMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Lock entity. +// If the Lock object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *LockMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *LockMutation) ResetName() { + m.name = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *LockMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *LockMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Lock entity. +// If the Lock object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *LockMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *LockMutation) ResetCreatedAt() { + m.created_at = nil +} + +// Where appends a list predicates to the LockMutation builder. +func (m *LockMutation) Where(ps ...predicate.Lock) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the LockMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *LockMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Lock, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *LockMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *LockMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Lock). +func (m *LockMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *LockMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.name != nil { + fields = append(fields, lock.FieldName) + } + if m.created_at != nil { + fields = append(fields, lock.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *LockMutation) Field(name string) (ent.Value, bool) { + switch name { + case lock.FieldName: + return m.Name() + case lock.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *LockMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case lock.FieldName: + return m.OldName(ctx) + case lock.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown Lock field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *LockMutation) SetField(name string, value ent.Value) error { + switch name { + case lock.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case lock.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown Lock field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *LockMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *LockMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *LockMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Lock numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *LockMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *LockMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *LockMutation) ClearField(name string) error { + return fmt.Errorf("unknown Lock nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *LockMutation) ResetField(name string) error { + switch name { + case lock.FieldName: + m.ResetName() + return nil + case lock.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown Lock field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *LockMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *LockMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *LockMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *LockMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *LockMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *LockMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *LockMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Lock unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *LockMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Lock edge %s", name) +} + // MachineMutation represents an operation that mutates the Machine nodes in the graph. type MachineMutation struct { config diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go index e95abcec343..ad2e6d3f327 100644 --- a/pkg/database/ent/predicate/predicate.go +++ b/pkg/database/ent/predicate/predicate.go @@ -21,6 +21,9 @@ type Decision func(*sql.Selector) // Event is the predicate function for event builders. type Event func(*sql.Selector) +// Lock is the predicate function for lock builders. +type Lock func(*sql.Selector) + // Machine is the predicate function for machine builders. type Machine func(*sql.Selector) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index bceea37b3a7..87073074563 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -10,6 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" @@ -137,6 +138,12 @@ func init() { eventDescSerialized := eventFields[3].Descriptor() // event.SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. event.SerializedValidator = eventDescSerialized.Validators[0].(func(string) error) + lockFields := schema.Lock{}.Fields() + _ = lockFields + // lockDescCreatedAt is the schema descriptor for created_at field. + lockDescCreatedAt := lockFields[1].Descriptor() + // lock.DefaultCreatedAt holds the default value on creation for the created_at field. + lock.DefaultCreatedAt = lockDescCreatedAt.Default.(func() time.Time) machineFields := schema.Machine{}.Fields() _ = machineFields // machineDescCreatedAt is the schema descriptor for created_at field. diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go new file mode 100644 index 00000000000..de87efff3f7 --- /dev/null +++ b/pkg/database/ent/schema/lock.go @@ -0,0 +1,22 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type Lock struct { + ent.Schema +} + +func (Lock) Fields() []ent.Field { + return []ent.Field{ + field.String("name").Unique().StructTag(`json:"name"`), + field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), + } +} + +func (Lock) Edges() []ent.Edge { + return nil +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 65c2ed00a44..27b39c12502 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -22,6 +22,8 @@ type Tx struct { Decision *DecisionClient // Event is the client for interacting with the Event builders. Event *EventClient + // Lock is the client for interacting with the Lock builders. + Lock *LockClient // Machine is the client for interacting with the Machine builders. Machine *MachineClient // Meta is the client for interacting with the Meta builders. @@ -162,6 +164,7 @@ func (tx *Tx) init() { tx.ConfigItem = NewConfigItemClient(tx.config) tx.Decision = NewDecisionClient(tx.config) tx.Event = NewEventClient(tx.config) + tx.Lock = NewLockClient(tx.config) tx.Machine = NewMachineClient(tx.config) tx.Meta = NewMetaClient(tx.config) } diff --git a/pkg/database/lock.go b/pkg/database/lock.go new file mode 100644 index 00000000000..339226e8592 --- /dev/null +++ b/pkg/database/lock.go @@ -0,0 +1,67 @@ +package database + +import ( + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const ( + CAPIPullLockTimeout = 120 +) + +func (c *Client) AcquireLock(name string) error { + _, err := c.Ent.Lock.Create(). + SetName(name). + SetCreatedAt(types.UtcNow()). + Save(c.CTX) + if ent.IsConstraintError(err) { + return err + } + if err != nil { + return errors.Wrapf(InsertFail, "insert lock: %s", err) + } + return nil +} + +func (c *Client) ReleaseLock(name string) error { + _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) + if err != nil { + return errors.Wrapf(DeleteFail, "delete lock: %s", err) + } + return nil +} + +func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { + log.Debugf("(%s) releasing orphin locks", name) + _, err := c.Ent.Lock.Delete().Where( + lock.NameEQ(name), + lock.CreatedAtLT(time.Now().Add(-time.Duration(timeout)*time.Minute)), + ).Exec(c.CTX) + if err != nil { + return errors.Wrapf(DeleteFail, "delete lock: %s", err) + } + return nil +} + +func (c *Client) IsLocked(err error) bool { + return ent.IsConstraintError(err) +} + +func (c *Client) AcquirePullCAPILock() error { + lockName := "pullCAPI" + err := c.ReleaseLockWithTimeout(lockName, CAPIPullLockTimeout) + if err != nil { + log.Errorf("unable to release pullCAPI lock: %s", err) + } + return c.AcquireLock(lockName) +} + +func (c *Client) ReleasePullCAPILock() error { + return c.ReleaseLockWithTimeout("pullCAPI", CAPIPullLockTimeout) +} From 717fc97ca039a2fdf2afbdd73b2a8b417b48c69e Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 14 Feb 2024 13:38:40 +0100 Subject: [PATCH 030/318] add SetMeta and SetParsed helpers (#2845) * add SetMeta and SetParsed helpers --- pkg/types/event.go | 16 ++++++++ pkg/types/event_test.go | 82 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/pkg/types/event.go b/pkg/types/event.go index 074241918d8..c7b19fe3ca4 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -46,6 +46,22 @@ type Event struct { Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` } +func (e *Event) SetMeta(key string, value string) bool { + if e.Meta == nil { + e.Meta = make(map[string]string) + } + e.Meta[key] = value + return true +} + +func (e *Event) SetParsed(key string, value string) bool { + if e.Parsed == nil { + e.Parsed = make(map[string]string) + } + e.Parsed[key] = value + return true +} + func (e *Event) GetType() string { if e.Type == OVFLW { return "overflow" diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index 14ca48cd2a8..a2fad9ebcc7 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -9,6 +9,88 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +func TestSetParsed(t *testing.T) { + tests := []struct { + name string + evt *Event + key string + value string + expected bool + }{ + { + name: "SetParsed: Valid", + evt: &Event{}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetParsed: Existing map", + evt: &Event{Parsed: map[string]string{}}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetParsed: Existing map+key", + evt: &Event{Parsed: map[string]string{"test": "foobar"}}, + key: "test", + value: "test", + expected: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.evt.SetParsed(tt.key, tt.value) + assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) + }) + } + +} + +func TestSetMeta(t *testing.T) { + tests := []struct { + name string + evt *Event + key string + value string + expected bool + }{ + { + name: "SetMeta: Valid", + evt: &Event{}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetMeta: Existing map", + evt: &Event{Meta: map[string]string{}}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetMeta: Existing map+key", + evt: &Event{Meta: map[string]string{"test": "foobar"}}, + key: "test", + value: "test", + expected: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.evt.SetMeta(tt.key, tt.value) + assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) + }) + } + +} + func TestParseIPSources(t *testing.T) { tests := []struct { name string From e976614645aba906a096f4bdf46e09709f71d096 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 15 Feb 2024 14:34:12 +0100 Subject: [PATCH 031/318] cscli metrics: rename buckets -> scenarios (#2848) * cscli metrics: rename buckets -> scenarios * update lint configuration * lint --- .golangci.yml | 6 +++++- cmd/crowdsec-cli/metrics.go | 20 ++++++++++++-------- cmd/crowdsec-cli/metrics_table.go | 18 ++++++++++++++---- pkg/exprhelpers/exprlib_test.go | 4 ++-- pkg/parser/README.md | 2 +- pkg/setup/README.md | 2 +- 6 files changed, 35 insertions(+), 17 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e605ac079d4..29332447b61 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -73,6 +73,10 @@ linters-settings: - pkg: "github.com/pkg/errors" desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" + wsl: + # Allow blocks to end with comments + allow-trailing-comment: true + linters: enable-all: true disable: @@ -105,6 +109,7 @@ linters: # - durationcheck # check for two durations multiplied together # - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + # - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega @@ -203,7 +208,6 @@ linters: # # Too strict / too many false positives (for now?) # - - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds - exhaustruct # Checks if all structure fields are initialized - forbidigo # Forbids identifiers - gochecknoglobals # check that no global variables exist diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 6e23bcf12e4..0f92343868d 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -44,9 +44,8 @@ type ( ) var ( - ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") + ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") - ) type metricSection interface { @@ -59,7 +58,7 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ "acquisition": statAcquis{}, - "buckets": statBucket{}, + "scenarios": statBucket{}, "parsers": statParser{}, "lapi": statLapi{}, "lapi-machine": statLapiMachine{}, @@ -110,7 +109,7 @@ func (ms metricStore) Fetch(url string) error { mAcquis := ms["acquisition"].(statAcquis) mParser := ms["parsers"].(statParser) - mBucket := ms["buckets"].(statBucket) + mBucket := ms["scenarios"].(statBucket) mLapi := ms["lapi"].(statLapi) mLapiMachine := ms["lapi-machine"].(statLapiMachine) mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) @@ -361,7 +360,7 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return cli.show(nil, url, noUnit) }, } @@ -383,7 +382,7 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string { for _, section := range args { switch section { case "engine": - ret = append(ret, "acquisition", "parsers", "buckets", "stash", "whitelists") + ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists") case "lapi": ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") case "appsec": @@ -413,10 +412,13 @@ cscli metrics show cscli metrics show engine # Show some specific metrics, show empty tables, connect to a different url -cscli metrics show acquisition parsers buckets stash --url http://lapi.local:6060/metrics +cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics + +# To list available metric types, use "cscli metrics list" +cscli metrics list; cscli metrics list -o json # Show metrics in json format -cscli metrics show acquisition parsers buckets stash -o json`, +cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { @@ -467,12 +469,14 @@ func (cli *cliMetrics) list() error { if err != nil { return fmt.Errorf("failed to marshal metric types: %w", err) } + fmt.Println(string(x)) case "raw": x, err := yaml.Marshal(allMetrics) if err != nil { return fmt.Errorf("failed to marshal metric types: %w", err) } + fmt.Println(string(x)) } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index da6ea3d9f1d..689929500ad 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "io" "sort" @@ -13,7 +14,7 @@ import ( ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. -var ErrNilTable = fmt.Errorf("nil table") +var ErrNilTable = errors.New("nil table") func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count @@ -44,6 +45,7 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i } t.AddRow(row...) + numRows++ } } @@ -82,6 +84,7 @@ func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int } t.AddRow(row...) + numRows++ } } @@ -120,6 +123,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri } t.AddRow(row...) + numRows++ } @@ -127,7 +131,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri } func (s statBucket) Description() (string, string) { - return "Bucket Metrics", + return "Scenario Metrics", `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` } @@ -143,13 +147,13 @@ func (s statBucket) Process(bucket, metric string, val int) { func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) - t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting bucket stats: %s", err) + log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() renderTableTitle(out, "\n"+title+":") @@ -352,6 +356,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { strconv.Itoa(astats.Count), } t.AddRow(row...) + numRows++ } @@ -400,7 +405,9 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { sl, strconv.Itoa(astats[sl]), } + t.AddRow(row...) + numRows++ } } @@ -515,6 +522,7 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { strconv.Itoa(hits.Empty), strconv.Itoa(hits.NonEmpty), ) + numRows++ } @@ -560,6 +568,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { action, strconv.Itoa(hits), ) + numRows++ } } @@ -594,6 +603,7 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { scenario, strconv.Itoa(hits), ) + numRows++ } diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 6b9cd15c73b..9d5a6556b25 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -200,7 +200,7 @@ func TestDistanceHelper(t *testing.T) { ret, err := expr.Run(vm, env) if test.valid { require.NoError(t, err) - assert.Equal(t, test.dist, ret) + assert.InDelta(t, test.dist, ret, 0.000001) } else { require.Error(t, err) } @@ -592,7 +592,7 @@ func TestAtof(t *testing.T) { require.NoError(t, err) output, err := expr.Run(program, test.env) require.NoError(t, err) - require.Equal(t, test.result, output) + require.InDelta(t, test.result, output, 0.000001) } } diff --git a/pkg/parser/README.md b/pkg/parser/README.md index 62a56e61820..0fcccc811e4 100644 --- a/pkg/parser/README.md +++ b/pkg/parser/README.md @@ -45,7 +45,7 @@ statics: > `filter: "Line.Src endsWith '/foobar'"` - - *optional* `filter` : an [expression](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) that will be evaluated against the runtime of a line (`Event`) + - *optional* `filter` : an [expression](https://github.com/antonmedv/expr/blob/master/docs/language-definition.md) that will be evaluated against the runtime of a line (`Event`) - if the `filter` is present and returns false, node is not evaluated - if `filter` is absent or present and returns true, node is evaluated diff --git a/pkg/setup/README.md b/pkg/setup/README.md index 3585ee8b141..9cdc7243975 100644 --- a/pkg/setup/README.md +++ b/pkg/setup/README.md @@ -129,7 +129,7 @@ services: and must all return true for a service to be detected (implied *and* clause, no short-circuit). A missing or empty `when:` section is evaluated as true. The [expression -engine](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) +engine](https://github.com/antonmedv/expr/blob/master/docs/language-definition.md) is the same one used by CrowdSec parser filters. You can force the detection of a process by using the `cscli setup detect... --force-process ` flag. It will always behave as if `` was running. From f3ea88f64ce7a594830558c84bc6f196ddddc323 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Wed, 21 Feb 2024 13:40:38 +0000 Subject: [PATCH 032/318] Appsec unix socket (#2737) * Appsec socket * Patch detection of nil listenaddr * Allow TLS unix socket * Merge diff issue --- pkg/acquisition/modules/appsec/appsec.go | 55 ++++++++++++++++++------ 1 file changed, 42 insertions(+), 13 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 4e2ff0bd22b..a3c8c7dd8ee 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -4,7 +4,9 @@ import ( "context" "encoding/json" "fmt" + "net" "net/http" + "os" "sync" "time" @@ -34,6 +36,7 @@ var ( // configuration structure of the acquis for the application security engine type AppsecSourceConfig struct { ListenAddr string `yaml:"listen_addr"` + ListenSocket string `yaml:"listen_socket"` CertFilePath string `yaml:"cert_file"` KeyFilePath string `yaml:"key_file"` Path string `yaml:"path"` @@ -97,7 +100,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { return errors.Wrap(err, "Cannot parse appsec configuration") } - if w.config.ListenAddr == "" { + if w.config.ListenAddr == "" && w.config.ListenSocket == "" { w.config.ListenAddr = "127.0.0.1:7422" } @@ -123,7 +126,12 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { } if w.config.Name == "" { - w.config.Name = fmt.Sprintf("%s%s", w.config.ListenAddr, w.config.Path) + if w.config.ListenSocket != "" && w.config.ListenAddr == "" { + w.config.Name = w.config.ListenSocket + } + if w.config.ListenSocket == "" { + w.config.Name = fmt.Sprintf("%s%s", w.config.ListenAddr, w.config.Path) + } } csConfig := csconfig.GetConfig() @@ -251,23 +259,44 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) return runner.Run(t) }) } - - w.logger.Infof("Starting Appsec server on %s%s", w.config.ListenAddr, w.config.Path) t.Go(func() error { - var err error - if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { - err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) - } else { - err = w.server.ListenAndServe() + if w.config.ListenSocket != "" { + w.logger.Infof("creating unix socket %s", w.config.ListenSocket) + _ = os.RemoveAll(w.config.ListenSocket) + listener, err := net.Listen("unix", w.config.ListenSocket) + if err != nil { + return errors.Wrap(err, "Appsec server failed") + } + defer listener.Close() + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { + err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) + } else { + err = w.server.Serve(listener) + } + if err != nil && err != http.ErrServerClosed { + return errors.Wrap(err, "Appsec server failed") + } } - - if err != nil && err != http.ErrServerClosed { - return errors.Wrap(err, "Appsec server failed") + return nil + }) + t.Go(func() error { + var err error + if w.config.ListenAddr != "" { + w.logger.Infof("creating TCP server on %s", w.config.ListenAddr) + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { + err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) + } else { + err = w.server.ListenAndServe() + } + + if err != nil && err != http.ErrServerClosed { + return errors.Wrap(err, "Appsec server failed") + } } return nil }) <-t.Dying() - w.logger.Infof("Stopping Appsec server on %s%s", w.config.ListenAddr, w.config.Path) + w.logger.Info("Shutting down Appsec server") //xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) w.server.Shutdown(context.TODO()) From 3e3df5e4c6e6deb1ef36bb406e86a7ebc8c30f06 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:04:36 +0100 Subject: [PATCH 033/318] refact "cscli config", remove flag "cscli restore --old-backup" (#2832) * refact "cscli config show" * refact "cscli config backup" * refact "cscli confgi show-yaml" * refact "cscli config restore" * refact "cscli config feature-flags" * cscli restore: remove 'old-backup' option * lint (whitespace, wrapped errors) --- cmd/crowdsec-cli/config.go | 26 ++-- cmd/crowdsec-cli/config_backup.go | 99 ++++++------- cmd/crowdsec-cli/config_feature_flags.go | 25 ++-- cmd/crowdsec-cli/config_restore.go | 175 ++++++++--------------- cmd/crowdsec-cli/config_show.go | 37 +++-- cmd/crowdsec-cli/config_showyaml.go | 12 +- cmd/crowdsec-cli/main.go | 2 +- 7 files changed, 167 insertions(+), 209 deletions(-) diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go index e60246db790..e88845798e2 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/config.go @@ -4,19 +4,29 @@ import ( "github.com/spf13/cobra" ) -func NewConfigCmd() *cobra.Command { - cmdConfig := &cobra.Command{ +type cliConfig struct { + cfg configGetter +} + +func NewCLIConfig(cfg configGetter) *cliConfig { + return &cliConfig{ + cfg: cfg, + } +} + +func (cli *cliConfig) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, } - cmdConfig.AddCommand(NewConfigShowCmd()) - cmdConfig.AddCommand(NewConfigShowYAMLCmd()) - cmdConfig.AddCommand(NewConfigBackupCmd()) - cmdConfig.AddCommand(NewConfigRestoreCmd()) - cmdConfig.AddCommand(NewConfigFeatureFlagsCmd()) + cmd.AddCommand(cli.newShowCmd()) + cmd.AddCommand(cli.newShowYAMLCmd()) + cmd.AddCommand(cli.newBackupCmd()) + cmd.AddCommand(cli.newRestoreCmd()) + cmd.AddCommand(cli.newFeatureFlagsCmd()) - return cmdConfig + return cmd } diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index 9414fa51033..d1e4a393555 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -13,8 +14,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func backupHub(dirPath string) error { - hub, err := require.Hub(csConfig, nil, nil) +func (cli *cliConfig) backupHub(dirPath string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } @@ -32,7 +33,7 @@ func backupHub(dirPath string) error { itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itemType) if err = os.MkdirAll(itemDirectory, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s : %s", itemDirectory, err) + return fmt.Errorf("error while creating %s: %w", itemDirectory, err) } upstreamParsers := []string{} @@ -41,18 +42,18 @@ func backupHub(dirPath string) error { clog = clog.WithFields(log.Fields{ "file": v.Name, }) - if !v.State.Installed { //only backup installed ones - clog.Debugf("[%s] : not installed", k) + if !v.State.Installed { // only backup installed ones + clog.Debugf("[%s]: not installed", k) continue } - //for the local/tainted ones, we back up the full file + // for the local/tainted ones, we back up the full file if v.State.Tainted || v.State.IsLocal() || !v.State.UpToDate { - //we need to backup stages for parsers + // we need to backup stages for parsers if itemType == cwhub.PARSERS || itemType == cwhub.POSTOVERFLOWS { fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) if err = os.MkdirAll(fstagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err) + return fmt.Errorf("error while creating stage dir %s: %w", fstagedir, err) } } @@ -60,7 +61,7 @@ func backupHub(dirPath string) error { tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName) if err = CopyFile(v.State.LocalPath, tfile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.State.LocalPath, tfile, err) + return fmt.Errorf("failed copy %s %s to %s: %w", itemType, v.State.LocalPath, tfile, err) } clog.Infof("local/tainted saved %s to %s", v.State.LocalPath, tfile) @@ -68,21 +69,21 @@ func backupHub(dirPath string) error { continue } - clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) + clog.Debugf("[%s]: from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.State.UpToDate) upstreamParsers = append(upstreamParsers, v.Name) } - //write the upstream items + // write the upstream items upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") if err != nil { - return fmt.Errorf("failed marshaling upstream parsers : %s", err) + return fmt.Errorf("failed marshaling upstream parsers: %w", err) } err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) if err != nil { - return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err) + return fmt.Errorf("unable to write to %s %s: %w", itemType, upstreamParsersFname, err) } clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) @@ -102,11 +103,13 @@ func backupHub(dirPath string) error { - Tainted/local/out-of-date scenarios, parsers, postoverflows and collections - Acquisition files (acquis.yaml, acquis.d/*.yaml) */ -func backupConfigToDirectory(dirPath string) error { +func (cli *cliConfig) backup(dirPath string) error { var err error + cfg := cli.cfg() + if dirPath == "" { - return fmt.Errorf("directory path can't be empty") + return errors.New("directory path can't be empty") } log.Infof("Starting configuration backup") @@ -121,10 +124,10 @@ func backupConfigToDirectory(dirPath string) error { return fmt.Errorf("while creating %s: %w", dirPath, err) } - if csConfig.ConfigPaths.SimulationFilePath != "" { + if cfg.ConfigPaths.SimulationFilePath != "" { backupSimulation := filepath.Join(dirPath, "simulation.yaml") - if err = CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", csConfig.ConfigPaths.SimulationFilePath, backupSimulation, err) + if err = CopyFile(cfg.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.ConfigPaths.SimulationFilePath, backupSimulation, err) } log.Infof("Saved simulation to %s", backupSimulation) @@ -134,22 +137,22 @@ func backupConfigToDirectory(dirPath string) error { - backup AcquisitionFilePath - backup the other files of acquisition directory */ - if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" { + if cfg.Crowdsec != nil && cfg.Crowdsec.AcquisitionFilePath != "" { backupAcquisition := filepath.Join(dirPath, "acquis.yaml") - if err = CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err) + if err = CopyFile(cfg.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.Crowdsec.AcquisitionFilePath, backupAcquisition, err) } } acquisBackupDir := filepath.Join(dirPath, "acquis") if err = os.Mkdir(acquisBackupDir, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %s", acquisBackupDir, err) + return fmt.Errorf("error while creating %s: %w", acquisBackupDir, err) } - if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles { + if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { + for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { /*if it was the default one, it was already backup'ed*/ - if csConfig.Crowdsec.AcquisitionFilePath == acquisFile { + if cfg.Crowdsec.AcquisitionFilePath == acquisFile { continue } @@ -169,56 +172,48 @@ func backupConfigToDirectory(dirPath string) error { if ConfigFilePath != "" { backupMain := fmt.Sprintf("%s/config.yaml", dirPath) if err = CopyFile(ConfigFilePath, backupMain); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", ConfigFilePath, backupMain, err) + return fmt.Errorf("failed copy %s to %s: %w", ConfigFilePath, backupMain, err) } log.Infof("Saved default yaml to %s", backupMain) } - if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.OnlineClient != nil && cfg.API.Server.OnlineClient.CredentialsFilePath != "" { backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if err = CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) + if err = CopyFile(cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) } log.Infof("Saved online API credentials to %s", backupCAPICreds) } - if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { + if cfg.API != nil && cfg.API.Client != nil && cfg.API.Client.CredentialsFilePath != "" { backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if err = CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err) + if err = CopyFile(cfg.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Client.CredentialsFilePath, backupLAPICreds, err) } log.Infof("Saved local API credentials to %s", backupLAPICreds) } - if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" { + if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.ProfilesPath != "" { backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if err = CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.API.Server.ProfilesPath, backupProfiles, err) + if err = CopyFile(cfg.API.Server.ProfilesPath, backupProfiles); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.ProfilesPath, backupProfiles, err) } log.Infof("Saved profiles to %s", backupProfiles) } - if err = backupHub(dirPath); err != nil { - return fmt.Errorf("failed to backup hub config: %s", err) - } - - return nil -} - -func runConfigBackup(cmd *cobra.Command, args []string) error { - if err := backupConfigToDirectory(args[0]); err != nil { - return fmt.Errorf("failed to backup config: %w", err) + if err = cli.backupHub(dirPath); err != nil { + return fmt.Errorf("failed to backup hub config: %w", err) } return nil } -func NewConfigBackupCmd() *cobra.Command { - cmdConfigBackup := &cobra.Command{ +func (cli *cliConfig) newBackupCmd() *cobra.Command { + cmd := &cobra.Command{ Use: `backup "directory"`, Short: "Backup current config", Long: `Backup the current crowdsec configuration including : @@ -232,8 +227,14 @@ func NewConfigBackupCmd() *cobra.Command { Example: `cscli config backup ./my-backup`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runConfigBackup, + RunE: func(_ *cobra.Command, args []string) error { + if err := cli.backup(args[0]); err != nil { + return fmt.Errorf("failed to backup config: %w", err) + } + + return nil + }, } - return cmdConfigBackup + return cmd } diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/config_feature_flags.go index fbba1f56736..d1dbe2b93b7 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/config_feature_flags.go @@ -11,14 +11,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -func runConfigFeatureFlags(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - showRetired, err := flags.GetBool("retired") - if err != nil { - return err - } - +func (cli *cliConfig) featureFlags(showRetired bool) error { green := color.New(color.FgGreen).SprintFunc() red := color.New(color.FgRed).SprintFunc() yellow := color.New(color.FgYellow).SprintFunc() @@ -121,18 +114,22 @@ func runConfigFeatureFlags(cmd *cobra.Command, args []string) error { return nil } -func NewConfigFeatureFlagsCmd() *cobra.Command { - cmdConfigFeatureFlags := &cobra.Command{ +func (cli *cliConfig) newFeatureFlagsCmd() *cobra.Command { + var showRetired bool + + cmd := &cobra.Command{ Use: "feature-flags", Short: "Displays feature flag status", Long: `Displays the supported feature flags and their current status.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runConfigFeatureFlags, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.featureFlags(showRetired) + }, } - flags := cmdConfigFeatureFlags.Flags() - flags.Bool("retired", false, "Show retired features") + flags := cmd.Flags() + flags.BoolVar(&showRetired, "retired", false, "Show retired features") - return cmdConfigFeatureFlags + return cmd } diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 17d7494c60f..513f993ba80 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -3,25 +3,17 @@ package main import ( "encoding/json" "fmt" - "io" "os" "path/filepath" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type OldAPICfg struct { - MachineID string `json:"machine_id"` - Password string `json:"password"` -} - -func restoreHub(dirPath string) error { +func (cli *cliConfig) restoreHub(dirPath string) error { hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), nil) if err != nil { return err @@ -38,14 +30,14 @@ func restoreHub(dirPath string) error { file, err := os.ReadFile(upstreamListFN) if err != nil { - return fmt.Errorf("error while opening %s : %s", upstreamListFN, err) + return fmt.Errorf("error while opening %s: %w", upstreamListFN, err) } var upstreamList []string err = json.Unmarshal(file, &upstreamList) if err != nil { - return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err) + return fmt.Errorf("error unmarshaling %s: %w", upstreamListFN, err) } for _, toinstall := range upstreamList { @@ -55,8 +47,7 @@ func restoreHub(dirPath string) error { continue } - err := item.Install(false, false) - if err != nil { + if err = item.Install(false, false); err != nil { log.Errorf("Error while installing %s : %s", toinstall, err) } } @@ -64,17 +55,17 @@ func restoreHub(dirPath string) error { /*restore the local and tainted items*/ files, err := os.ReadDir(itemDirectory) if err != nil { - return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory, err) + return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory, err) } for _, file := range files { - //this was the upstream data + // this was the upstream data if file.Name() == fmt.Sprintf("upstream-%s.json", itype) { continue } if itype == cwhub.PARSERS || itype == cwhub.POSTOVERFLOWS { - //we expect a stage here + // we expect a stage here if !file.IsDir() { continue } @@ -84,22 +75,23 @@ func restoreHub(dirPath string) error { log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage directory %s : %s", stagedir, err) + return fmt.Errorf("error while creating stage directory %s: %w", stagedir, err) } // find items ifiles, err := os.ReadDir(itemDirectory + "/" + stage + "/") if err != nil { - return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err) + return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory+"/"+stage, err) } - //finally copy item + + // finally copy item for _, tfile := range ifiles { log.Infof("Going to restore local/tainted [%s]", tfile.Name()) sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) } log.Infof("restored %s to %s", sourceFile, destinationFile) @@ -108,9 +100,11 @@ func restoreHub(dirPath string) error { log.Infof("Going to restore local/tainted [%s]", file.Name()) sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) destinationFile := fmt.Sprintf("%s/%s/%s", csConfig.ConfigPaths.ConfigDir, itype, file.Name()) + if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) } + log.Infof("restored %s to %s", sourceFile, destinationFile) } } @@ -130,95 +124,60 @@ func restoreHub(dirPath string) error { - Tainted/local/out-of-date scenarios, parsers, postoverflows and collections - Acquisition files (acquis.yaml, acquis.d/*.yaml) */ -func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { +func (cli *cliConfig) restore(dirPath string) error { var err error - if !oldBackup { - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if _, err = os.Stat(backupMain); err == nil { - if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err) - } + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) + if _, err = os.Stat(backupMain); err == nil { + if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { + if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupMain, csConfig.ConfigPaths.ConfigDir, err) } } + } - // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) - - log.Debug("Reloading configuration") + // Now we have config.yaml, we should regenerate config struct to have rights paths etc + ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) - csConfig, _, err = loadConfigFor("config") - if err != nil { - return fmt.Errorf("failed to reload configuration: %s", err) - } + log.Debug("Reloading configuration") - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) - } - } + csConfig, _, err = loadConfigFor("config") + if err != nil { + return fmt.Errorf("failed to reload configuration: %w", err) + } - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) - } + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupCAPICreds); err == nil { + if err = CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) } + } - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err) - } + backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupLAPICreds); err == nil { + if err = CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) } - } else { - var oldAPICfg OldAPICfg - backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath) - - jsonFile, err := os.Open(backupOldAPICfg) - if err != nil { - log.Warningf("failed to open %s : %s", backupOldAPICfg, err) - } else { - byteValue, _ := io.ReadAll(jsonFile) - err = json.Unmarshal(byteValue, &oldAPICfg) - if err != nil { - return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err) - } + } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: oldAPICfg.MachineID, - Password: oldAPICfg.Password, - URL: CAPIBaseURL, - } - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to dump api credentials: %s", err) - } - apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir) - if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { - apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath - } - err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0o600) - if err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err) - } - log.Infof("Saved API credentials to %s", apiConfigDumpFile) + backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) + if _, err = os.Stat(backupProfiles); err == nil { + if err = CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, csConfig.API.Server.ProfilesPath, err) } } backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) if _, err = os.Stat(backupSimulation); err == nil { if err = CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) } } /*if there is a acquisition dir, restore its content*/ if csConfig.Crowdsec.AcquisitionDirPath != "" { if err = os.MkdirAll(csConfig.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err) + return fmt.Errorf("error while creating %s: %w", csConfig.Crowdsec.AcquisitionDirPath, err) } } @@ -228,7 +187,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { log.Debugf("restoring backup'ed %s", backupAcquisition) if err = CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) + return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) } } @@ -244,7 +203,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { log.Debugf("restoring %s to %s", acquisFile, targetFname) if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) } } } @@ -265,37 +224,22 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { } if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) } log.Infof("Saved acquis %s to %s", acquisFile, targetFname) } } - if err = restoreHub(dirPath); err != nil { - return fmt.Errorf("failed to restore hub config : %s", err) + if err = cli.restoreHub(dirPath); err != nil { + return fmt.Errorf("failed to restore hub config: %w", err) } return nil } -func runConfigRestore(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - oldBackup, err := flags.GetBool("old-backup") - if err != nil { - return err - } - - if err := restoreConfigFromDirectory(args[0], oldBackup); err != nil { - return fmt.Errorf("failed to restore config from %s: %w", args[0], err) - } - - return nil -} - -func NewConfigRestoreCmd() *cobra.Command { - cmdConfigRestore := &cobra.Command{ +func (cli *cliConfig) newRestoreCmd() *cobra.Command { + cmd := &cobra.Command{ Use: `restore "directory"`, Short: `Restore config in backup "directory"`, Long: `Restore the crowdsec configuration from specified backup "directory" including: @@ -308,11 +252,16 @@ func NewConfigRestoreCmd() *cobra.Command { - Backup of API credentials (local API and online API)`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runConfigRestore, - } + RunE: func(_ *cobra.Command, args []string) error { + dirPath := args[0] - flags := cmdConfigRestore.Flags() - flags.BoolP("old-backup", "", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X") + if err := cli.restore(dirPath); err != nil { + return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) + } + + return nil + }, + } - return cmdConfigRestore + return cmd } diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index bab911cc340..634ca77410e 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -182,31 +182,26 @@ Central API: {{- end }} ` -func runConfigShow(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() +func (cli *cliConfig) show(key string) error { + cfg := cli.cfg() - if err := csConfig.LoadAPIClient(); err != nil { + if err := cfg.LoadAPIClient(); err != nil { log.Errorf("failed to load API client configuration: %s", err) // don't return, we can still show the configuration } - key, err := flags.GetString("key") - if err != nil { - return err - } - if key != "" { return showConfigKey(key) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": // The tests on .Enable look funny because the option has a true default which has // not been set yet (we don't really load the LAPI) and go templates don't dereference // pointers in boolean tests. Prefix notation is the cherry on top. funcs := template.FuncMap{ // can't use generics here - "ValueBool": func(b *bool) bool { return b!=nil && *b }, + "ValueBool": func(b *bool) bool { return b != nil && *b }, } tmp, err := template.New("config").Funcs(funcs).Parse(configShowTemplate) @@ -214,19 +209,19 @@ func runConfigShow(cmd *cobra.Command, args []string) error { return err } - err = tmp.Execute(os.Stdout, csConfig) + err = tmp.Execute(os.Stdout, cfg) if err != nil { return err } case "json": - data, err := json.MarshalIndent(csConfig, "", " ") + data, err := json.MarshalIndent(cfg, "", " ") if err != nil { return fmt.Errorf("failed to marshal configuration: %w", err) } fmt.Printf("%s\n", string(data)) case "raw": - data, err := yaml.Marshal(csConfig) + data, err := yaml.Marshal(cfg) if err != nil { return fmt.Errorf("failed to marshal configuration: %w", err) } @@ -237,18 +232,22 @@ func runConfigShow(cmd *cobra.Command, args []string) error { return nil } -func NewConfigShowCmd() *cobra.Command { - cmdConfigShow := &cobra.Command{ +func (cli *cliConfig) newShowCmd() *cobra.Command { + var key string + + cmd := &cobra.Command{ Use: "show", Short: "Displays current config", Long: `Displays the current cli configuration.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runConfigShow, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.show(key) + }, } - flags := cmdConfigShow.Flags() - flags.StringP("key", "", "", "Display only this value (Config.API.Server.ListenURI)") + flags := cmd.Flags() + flags.StringVarP(&key, "key", "", "", "Display only this value (Config.API.Server.ListenURI)") - return cmdConfigShow + return cmd } diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/config_showyaml.go index 82bc67ffcb8..52daee6a65e 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/config_showyaml.go @@ -6,19 +6,21 @@ import ( "github.com/spf13/cobra" ) -func runConfigShowYAML(cmd *cobra.Command, args []string) error { +func (cli *cliConfig) showYAML() error { fmt.Println(mergedConfig) return nil } -func NewConfigShowYAMLCmd() *cobra.Command { - cmdConfigShow := &cobra.Command{ +func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runConfigShowYAML, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.showYAML() + }, } - return cmdConfigShow + return cmd } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 27ac17d554f..1f87390b636 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -231,7 +231,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) - cmd.AddCommand(NewConfigCmd()) + cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) From 8da490f5930406180bef6f4b0b99e0b0dc86dff8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:42:33 +0100 Subject: [PATCH 034/318] refact pkg/apiclient (#2846) * extract resperr.go * extract method prepareRequest() * reset token inside mutex --- pkg/apiclient/auth_jwt.go | 37 +++++++++++++++++++++---------- pkg/apiclient/client.go | 36 ------------------------------ pkg/apiclient/resperr.go | 46 +++++++++++++++++++++++++++++++++++++++ pkg/apiserver/apic.go | 1 - 4 files changed, 72 insertions(+), 48 deletions(-) create mode 100644 pkg/apiclient/resperr.go diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 71b0e273105..2ead10cf6da 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -130,20 +130,24 @@ func (t *JWTTransport) refreshJwtToken() error { return nil } -// RoundTrip implements the RoundTripper interface. -func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - // In a few occasions several goroutines will execute refreshJwtToken concurrently which is useless and will cause overload on CAPI - // we use a mutex to avoid this - // We also bypass the refresh if we are requesting the login endpoint, as it does not require a token, and it leads to do 2 requests instead of one (refresh + actual login request) +func (t *JWTTransport) needsTokenRefresh() bool { + return t.Token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) +} + +// prepareRequest returns a copy of the request with the necessary authentication headers. +func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) { + // In a few occasions several goroutines will execute refreshJwtToken concurrently which is useless + // and will cause overload on CAPI. We use a mutex to avoid this. t.refreshTokenMutex.Lock() - if req.URL.Path != "/"+t.VersionPrefix+"/watchers/login" && (t.Token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC())) { - if err := t.refreshJwtToken(); err != nil { - t.refreshTokenMutex.Unlock() + defer t.refreshTokenMutex.Unlock() + // We bypass the refresh if we are requesting the login endpoint, as it does not require a token, + // and it leads to do 2 requests instead of one (refresh + actual login request). + if req.URL.Path != "/"+t.VersionPrefix+"/watchers/login" && t.needsTokenRefresh() { + if err := t.refreshJwtToken(); err != nil { return nil, err } } - t.refreshTokenMutex.Unlock() if t.UserAgent != "" { req.Header.Add("User-Agent", t.UserAgent) @@ -151,6 +155,16 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.Token)) + return req, nil +} + +// RoundTrip implements the RoundTripper interface. +func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req, err := t.prepareRequest(req) + if err != nil { + return nil, err + } + if log.GetLevel() >= log.TraceLevel { //requestToDump := cloneRequest(req) dump, _ := httputil.DumpRequest(req, true) @@ -166,7 +180,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { if err != nil { // we had an error (network error for example, or 401 because token is refused), reset the token? - t.Token = "" + t.ResetToken() return resp, fmt.Errorf("performing jwt auth: %w", err) } @@ -189,7 +203,8 @@ func (t *JWTTransport) ResetToken() { t.refreshTokenMutex.Unlock() } -// transport() returns a round tripper that retries once when the status is unauthorized, and 5 times when the infrastructure is overloaded. +// transport() returns a round tripper that retries once when the status is unauthorized, +// and 5 times when the infrastructure is overloaded. func (t *JWTTransport) transport() http.RoundTripper { transport := t.Transport if transport == nil { diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index b183a8c7909..b487f68a698 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -4,9 +4,7 @@ import ( "context" "crypto/tls" "crypto/x509" - "encoding/json" "fmt" - "io" "net/http" "net/url" @@ -167,44 +165,10 @@ type Response struct { //... } -type ErrorResponse struct { - models.ErrorResponse -} - -func (e *ErrorResponse) Error() string { - err := fmt.Sprintf("API error: %s", *e.Message) - if len(e.Errors) > 0 { - err += fmt.Sprintf(" (%s)", e.Errors) - } - - return err -} - func newResponse(r *http.Response) *Response { return &Response{Response: r} } -func CheckResponse(r *http.Response) error { - if c := r.StatusCode; 200 <= c && c <= 299 || c == 304 { - return nil - } - - errorResponse := &ErrorResponse{} - - data, err := io.ReadAll(r.Body) - if err == nil && len(data)>0 { - err := json.Unmarshal(data, errorResponse) - if err != nil { - return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) - } - } else { - errorResponse.Message = new(string) - *errorResponse.Message = fmt.Sprintf("http code %d, no error message", r.StatusCode) - } - - return errorResponse -} - type ListOpts struct { //Page int //PerPage int diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go new file mode 100644 index 00000000000..ff954a73609 --- /dev/null +++ b/pkg/apiclient/resperr.go @@ -0,0 +1,46 @@ +package apiclient + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type ErrorResponse struct { + models.ErrorResponse +} + +func (e *ErrorResponse) Error() string { + err := fmt.Sprintf("API error: %s", *e.Message) + if len(e.Errors) > 0 { + err += fmt.Sprintf(" (%s)", e.Errors) + } + + return err +} + +// CheckResponse verifies the API response and builds an appropriate Go error if necessary. +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 || c == 304 { + return nil + } + + ret := &ErrorResponse{} + + data, err := io.ReadAll(r.Body) + if err != nil || len(data) == 0 { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, no error message", r.StatusCode)) + return ret + } + + if err := json.Unmarshal(data, ret); err != nil { + return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) + } + + return ret +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 2fdb01144a0..f57ae685e45 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -539,7 +539,6 @@ func createAlertForDecision(decision *models.Decision) *models.Alert { scenario = *decision.Scenario scope = types.ListOrigin default: - // XXX: this or nil? scenario = "" scope = "" From 0df8f54fbbd08ab857e153229a43cf9e3c3f258e Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Thu, 22 Feb 2024 11:18:29 +0000 Subject: [PATCH 035/318] Add unix socket option to http plugin, we have to use this in conjunction with URL parameter as we dont know which path the user wants so if they would like to communicate over unix socket they need to use both, however, the hostname can be whatever they want. We could be a little smarter and actually parse the url, however, increasing code when a user can just define it correctly make no sense (#2764) --- cmd/notification-http/main.go | 42 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 340d462c175..382f30fea53 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -7,8 +7,10 @@ import ( "crypto/x509" "fmt" "io" + "net" "net/http" "os" + "strings" "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" @@ -19,6 +21,7 @@ import ( type PluginConfig struct { Name string `yaml:"name"` URL string `yaml:"url"` + UnixSocket string `yaml:"unix_socket"` Headers map[string]string `yaml:"headers"` SkipTLSVerification bool `yaml:"skip_tls_verification"` Method string `yaml:"method"` @@ -66,36 +69,40 @@ func getCertPool(caPath string) (*x509.CertPool, error) { return cp, nil } -func getTLSClient(tlsVerify bool, caPath, certPath, keyPath string) (*http.Client, error) { - var client *http.Client - - caCertPool, err := getCertPool(caPath) +func getTLSClient(c *PluginConfig) error { + caCertPool, err := getCertPool(c.CAPath) if err != nil { - return nil, err + return err } tlsConfig := &tls.Config{ RootCAs: caCertPool, - InsecureSkipVerify: tlsVerify, + InsecureSkipVerify: c.SkipTLSVerification, } - if certPath != "" && keyPath != "" { - logger.Info(fmt.Sprintf("Using client certificate '%s' and key '%s'", certPath, keyPath)) + if c.CertPath != "" && c.KeyPath != "" { + logger.Info(fmt.Sprintf("Using client certificate '%s' and key '%s'", c.CertPath, c.KeyPath)) - cert, err := tls.LoadX509KeyPair(certPath, keyPath) + cert, err := tls.LoadX509KeyPair(c.CertPath, c.KeyPath) if err != nil { - return nil, fmt.Errorf("unable to load client certificate '%s' and key '%s': %w", certPath, keyPath, err) + return fmt.Errorf("unable to load client certificate '%s' and key '%s': %w", c.CertPath, c.KeyPath, err) } tlsConfig.Certificates = []tls.Certificate{cert} } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + if c.UnixSocket != "" { + logger.Info(fmt.Sprintf("Using socket '%s'", c.UnixSocket)) + transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", strings.TrimSuffix(c.UnixSocket, "/")) + } + } + c.Client = &http.Client{ + Transport: transport, } - return client, err + return nil } func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { @@ -135,6 +142,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if resp.StatusCode < 200 || resp.StatusCode >= 300 { logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode)) + logger.Debug(fmt.Sprintf("HTTP server returned body: %s", string(respData))) return &protobufs.Empty{}, nil } @@ -147,7 +155,7 @@ func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (* if err != nil { return nil, err } - d.Client, err = getTLSClient(d.SkipTLSVerification, d.CAPath, d.CertPath, d.KeyPath) + err = getTLSClient(&d) if err != nil { return nil, err } From e34af358d7b96df49634f28696e9c1b1f01e097c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Feb 2024 10:37:04 +0100 Subject: [PATCH 036/318] refact cscli (globals) (#2854) * cscli capi: avoid globals, extract methods * cscli config restore: avoid global * cscli hubtest: avoid global * lint (whitespace, wrapped errors) --- cmd/crowdsec-cli/bouncers.go | 24 +-- cmd/crowdsec-cli/capi.go | 245 ++++++++++++++++------------- cmd/crowdsec-cli/config_restore.go | 54 ++++--- cmd/crowdsec-cli/hubtest.go | 170 +++++++++++--------- cmd/crowdsec-cli/main.go | 4 +- 5 files changed, 281 insertions(+), 216 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 717e9aef5fe..35f4320c520 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -3,6 +3,7 @@ package main import ( "encoding/csv" "encoding/json" + "errors" "fmt" "os" "slices" @@ -58,13 +59,16 @@ Note: This command requires database direct access, so is intended to be run on DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { var err error - if err = require.LAPI(cli.cfg()); err != nil { + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { return err } - cli.db, err = database.NewClient(cli.cfg().DbConfig) + cli.db, err = database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("can't connect to the database: %s", err) + return fmt.Errorf("can't connect to the database: %w", err) } return nil @@ -84,7 +88,7 @@ func (cli *cliBouncers) list() error { bouncers, err := cli.db.ListBouncers() if err != nil { - return fmt.Errorf("unable to list bouncers: %s", err) + return fmt.Errorf("unable to list bouncers: %w", err) } switch cli.cfg().Cscli.Output { @@ -146,13 +150,13 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { if key == "" { key, err = middlewares.GenerateAPIKey(keyLength) if err != nil { - return fmt.Errorf("unable to generate api key: %s", err) + return fmt.Errorf("unable to generate api key: %w", err) } } _, err = cli.db.CreateBouncer(bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) if err != nil { - return fmt.Errorf("unable to create bouncer: %s", err) + return fmt.Errorf("unable to create bouncer: %w", err) } switch cli.cfg().Cscli.Output { @@ -165,7 +169,7 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { case "json": j, err := json.Marshal(key) if err != nil { - return fmt.Errorf("unable to marshal api key") + return errors.New("unable to marshal api key") } fmt.Print(string(j)) @@ -191,7 +195,7 @@ cscli bouncers add MyBouncerName --key `, flags := cmd.Flags() flags.StringP("length", "l", "", "length of the api key") - flags.MarkDeprecated("length", "use --key instead") + _ = flags.MarkDeprecated("length", "use --key instead") flags.StringVarP(&key, "key", "k", "", "api key for the bouncer") return cmd @@ -218,7 +222,7 @@ func (cli *cliBouncers) delete(bouncers []string) error { for _, bouncerID := range bouncers { err := cli.db.DeleteBouncer(bouncerID) if err != nil { - return fmt.Errorf("unable to delete bouncer '%s': %s", bouncerID, err) + return fmt.Errorf("unable to delete bouncer '%s': %w", bouncerID, err) } log.Infof("bouncer '%s' deleted successfully", bouncerID) @@ -280,7 +284,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { deleted, err := cli.db.BulkDeleteBouncers(bouncers) if err != nil { - return fmt.Errorf("unable to prune bouncers: %s", err) + return fmt.Errorf("unable to prune bouncers: %w", err) } fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 358d91ee215..e56a8a74707 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -2,6 +2,7 @@ package main import ( "context" + "errors" "fmt" "net/url" "os" @@ -26,24 +27,29 @@ const ( CAPIURLPrefix = "v3" ) -type cliCapi struct{} +type cliCapi struct { + cfg configGetter +} -func NewCLICapi() *cliCapi { - return &cliCapi{} +func NewCLICapi(cfg configGetter) *cliCapi { + return &cliCapi{ + cfg: cfg, + } } -func (cli cliCapi) NewCommand() *cobra.Command { - var cmd = &cobra.Command{ +func (cli *cliCapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "capi [action]", Short: "Manage interaction with Central API (CAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - if err := require.LAPI(csConfig); err != nil { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := require.CAPI(csConfig); err != nil { + if err := require.CAPI(cfg); err != nil { return err } @@ -51,78 +57,92 @@ func (cli cliCapi) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewRegisterCmd()) - cmd.AddCommand(cli.NewStatusCmd()) + cmd.AddCommand(cli.newRegisterCmd()) + cmd.AddCommand(cli.newStatusCmd()) return cmd } -func (cli cliCapi) NewRegisterCmd() *cobra.Command { +func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { + cfg := cli.cfg() + + capiUser, err := generateID(capiUserPrefix) + if err != nil { + return fmt.Errorf("unable to generate machine id: %w", err) + } + + password := strfmt.Password(generatePassword(passwordLength)) + + apiurl, err := url.Parse(types.CAPIBaseURL) + if err != nil { + return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) + } + + _, err = apiclient.RegisterClient(&apiclient.Config{ + MachineID: capiUser, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiurl, + VersionPrefix: CAPIURLPrefix, + }, nil) + + if err != nil { + return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) + } + + log.Infof("Successfully registered to Central API (CAPI)") + + var dumpFile string + + switch { + case outputFile != "": + dumpFile = outputFile + case cfg.API.Server.OnlineClient.CredentialsFilePath != "": + dumpFile = cfg.API.Server.OnlineClient.CredentialsFilePath + default: + dumpFile = "" + } + + apiCfg := csconfig.ApiCredentialsCfg{ + Login: capiUser, + Password: password.String(), + URL: types.CAPIBaseURL, + } + + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to marshal api credentials: %w", err) + } + + if dumpFile != "" { + err = os.WriteFile(dumpFile, apiConfigDump, 0o600) + if err != nil { + return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) + } + + log.Infof("Central API credentials written to '%s'", dumpFile) + } else { + fmt.Println(string(apiConfigDump)) + } + + log.Warning(ReloadMessage()) + + return nil +} + +func (cli *cliCapi) newRegisterCmd() *cobra.Command { var ( capiUserPrefix string - outputFile string + outputFile string ) - var cmd = &cobra.Command{ + cmd := &cobra.Command{ Use: "register", Short: "Register to Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - var err error - capiUser, err := generateID(capiUserPrefix) - if err != nil { - return fmt.Errorf("unable to generate machine id: %s", err) - } - password := strfmt.Password(generatePassword(passwordLength)) - apiurl, err := url.Parse(types.CAPIBaseURL) - if err != nil { - return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) - } - _, err = apiclient.RegisterClient(&apiclient.Config{ - MachineID: capiUser, - Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), - URL: apiurl, - VersionPrefix: CAPIURLPrefix, - }, nil) - - if err != nil { - return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) - } - log.Printf("Successfully registered to Central API (CAPI)") - - var dumpFile string - - if outputFile != "" { - dumpFile = outputFile - } else if csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { - dumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath - } else { - dumpFile = "" - } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: capiUser, - Password: password.String(), - URL: types.CAPIBaseURL, - } - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) - } - if dumpFile != "" { - err = os.WriteFile(dumpFile, apiConfigDump, 0o600) - if err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) - } - log.Printf("Central API credentials written to '%s'", dumpFile) - } else { - fmt.Println(string(apiConfigDump)) - } - - log.Warning(ReloadMessage()) - - return nil + return cli.register(capiUserPrefix, outputFile) }, } @@ -136,59 +156,66 @@ func (cli cliCapi) NewRegisterCmd() *cobra.Command { return cmd } -func (cli cliCapi) NewStatusCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "status", - Short: "Check status with the Central API (CAPI)", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - if err := require.CAPIRegistered(csConfig); err != nil { - return err - } +func (cli *cliCapi) status() error { + cfg := cli.cfg() - password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) + if err := require.CAPIRegistered(cfg); err != nil { + return err + } - apiurl, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) - if err != nil { - return fmt.Errorf("parsing api url ('%s'): %w", csConfig.API.Server.OnlineClient.Credentials.URL, err) - } + password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) - hub, err := require.Hub(csConfig, nil, nil) - if err != nil { - return err - } + apiurl, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) + if err != nil { + return fmt.Errorf("parsing api url ('%s'): %w", cfg.API.Server.OnlineClient.Credentials.URL, err) + } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) - } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } - if len(scenarios) == 0 { - return fmt.Errorf("no scenarios installed, abort") - } + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + if err != nil { + return fmt.Errorf("failed to get scenarios: %w", err) + } - Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) - if err != nil { - return fmt.Errorf("init default client: %w", err) - } + if len(scenarios) == 0 { + return errors.New("no scenarios installed, abort") + } - t := models.WatcherAuthRequest{ - MachineID: &csConfig.API.Server.OnlineClient.Credentials.Login, - Password: &password, - Scenarios: scenarios, - } + Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) + if err != nil { + return fmt.Errorf("init default client: %w", err) + } - log.Infof("Loaded credentials from %s", csConfig.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", csConfig.API.Server.OnlineClient.Credentials.Login, apiurl) + t := models.WatcherAuthRequest{ + MachineID: &cfg.API.Server.OnlineClient.Credentials.Login, + Password: &password, + Scenarios: scenarios, + } - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) - if err != nil { - return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) - } - log.Infof("You can successfully interact with Central API (CAPI)") + log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", cfg.API.Server.OnlineClient.Credentials.Login, apiurl) - return nil + _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) + } + + log.Info("You can successfully interact with Central API (CAPI)") + + return nil +} + +func (cli *cliCapi) newStatusCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: "Check status with the Central API (CAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.status() }, } diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 513f993ba80..ee7179b73c5 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -14,7 +14,9 @@ import ( ) func (cli *cliConfig) restoreHub(dirPath string) error { - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), nil) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(cfg), nil) if err != nil { return err } @@ -71,7 +73,7 @@ func (cli *cliConfig) restoreHub(dirPath string) error { } stage := file.Name() - stagedir := fmt.Sprintf("%s/%s/%s/", csConfig.ConfigPaths.ConfigDir, itype, stage) + stagedir := fmt.Sprintf("%s/%s/%s/", cfg.ConfigPaths.ConfigDir, itype, stage) log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { @@ -99,7 +101,7 @@ func (cli *cliConfig) restoreHub(dirPath string) error { } else { log.Infof("Going to restore local/tainted [%s]", file.Name()) sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) - destinationFile := fmt.Sprintf("%s/%s/%s", csConfig.ConfigPaths.ConfigDir, itype, file.Name()) + destinationFile := fmt.Sprintf("%s/%s/%s", cfg.ConfigPaths.ConfigDir, itype, file.Name()) if err = CopyFile(sourceFile, destinationFile); err != nil { return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) @@ -127,17 +129,19 @@ func (cli *cliConfig) restoreHub(dirPath string) error { func (cli *cliConfig) restore(dirPath string) error { var err error + cfg := cli.cfg() + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) if _, err = os.Stat(backupMain); err == nil { - if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupMain, csConfig.ConfigPaths.ConfigDir, err) + if cfg.ConfigPaths != nil && cfg.ConfigPaths.ConfigDir != "" { + if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir)); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupMain, cfg.ConfigPaths.ConfigDir, err) } } } // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) + ConfigFilePath = fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir) log.Debug("Reloading configuration") @@ -146,38 +150,40 @@ func (cli *cliConfig) restore(dirPath string) error { return fmt.Errorf("failed to reload configuration: %w", err) } + cfg = cli.cfg() + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) + if err = CopyFile(backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath, err) } } backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) + if err = CopyFile(backupLAPICreds, cfg.API.Client.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, cfg.API.Client.CredentialsFilePath, err) } } backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, csConfig.API.Server.ProfilesPath, err) + if err = CopyFile(backupProfiles, cfg.API.Server.ProfilesPath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, cfg.API.Server.ProfilesPath, err) } } backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) if _, err = os.Stat(backupSimulation); err == nil { - if err = CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) + if err = CopyFile(backupSimulation, cfg.ConfigPaths.SimulationFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, cfg.ConfigPaths.SimulationFilePath, err) } } /*if there is a acquisition dir, restore its content*/ - if csConfig.Crowdsec.AcquisitionDirPath != "" { - if err = os.MkdirAll(csConfig.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", csConfig.Crowdsec.AcquisitionDirPath, err) + if cfg.Crowdsec.AcquisitionDirPath != "" { + if err = os.MkdirAll(cfg.Crowdsec.AcquisitionDirPath, 0o700); err != nil { + return fmt.Errorf("error while creating %s: %w", cfg.Crowdsec.AcquisitionDirPath, err) } } @@ -186,8 +192,8 @@ func (cli *cliConfig) restore(dirPath string) error { if _, err = os.Stat(backupAcquisition); err == nil { log.Debugf("restoring backup'ed %s", backupAcquisition) - if err = CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) + if err = CopyFile(backupAcquisition, cfg.Crowdsec.AcquisitionFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, cfg.Crowdsec.AcquisitionFilePath, err) } } @@ -195,7 +201,7 @@ func (cli *cliConfig) restore(dirPath string) error { acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { for _, acquisFile := range acquisFiles { - targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) + targetFname, err := filepath.Abs(cfg.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) if err != nil { return fmt.Errorf("while saving %s to %s: %w", acquisFile, targetFname, err) } @@ -208,12 +214,12 @@ func (cli *cliConfig) restore(dirPath string) error { } } - if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles { + if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { + for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { log.Infof("backup filepath from dir -> %s", acquisFile) // if it was the default one, it has already been backed up - if csConfig.Crowdsec.AcquisitionFilePath == acquisFile { + if cfg.Crowdsec.AcquisitionFilePath == acquisFile { log.Infof("skip this one") continue } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 1860540e7dc..8f5ab087370 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "errors" "fmt" "math" "os" @@ -20,21 +21,29 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -var HubTest hubtest.HubTest -var HubAppsecTests hubtest.HubTest -var hubPtr *hubtest.HubTest -var isAppsecTest bool +var ( + HubTest hubtest.HubTest + HubAppsecTests hubtest.HubTest + hubPtr *hubtest.HubTest + isAppsecTest bool +) -type cliHubTest struct{} +type cliHubTest struct { + cfg configGetter +} -func NewCLIHubTest() *cliHubTest { - return &cliHubTest{} +func NewCLIHubTest(cfg configGetter) *cliHubTest { + return &cliHubTest{ + cfg: cfg, + } } -func (cli cliHubTest) NewCommand() *cobra.Command { - var hubPath string - var crowdsecPath string - var cscliPath string +func (cli *cliHubTest) NewCommand() *cobra.Command { + var ( + hubPath string + crowdsecPath string + cscliPath string + ) cmd := &cobra.Command{ Use: "hubtest", @@ -53,11 +62,13 @@ func (cli cliHubTest) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) } - /*commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests*/ + + // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests hubPtr = &HubTest if isAppsecTest { hubPtr = &HubAppsecTests } + return nil }, } @@ -79,13 +90,16 @@ func (cli cliHubTest) NewCommand() *cobra.Command { return cmd } -func (cli cliHubTest) NewCreateCmd() *cobra.Command { +func (cli *cliHubTest) NewCreateCmd() *cobra.Command { + var ( + ignoreParsers bool + labels map[string]string + logType string + ) + parsers := []string{} postoverflows := []string{} scenarios := []string{} - var ignoreParsers bool - var labels map[string]string - var logType string cmd := &cobra.Command{ Use: "create", @@ -107,7 +121,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios } if logType == "" { - return fmt.Errorf("please provide a type (--type) for the test") + return errors.New("please provide a type (--type) for the test") } if err := os.MkdirAll(testPath, os.ModePerm); err != nil { @@ -118,7 +132,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios configFileData := &hubtest.HubTestItemConfig{} if logType == "appsec" { - //create empty nuclei template file + // create empty nuclei template file nucleiFileName := fmt.Sprintf("%s.yaml", testName) nucleiFilePath := filepath.Join(testPath, nucleiFileName) nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755) @@ -128,7 +142,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) if ntpl == nil { - return fmt.Errorf("unable to parse nuclei template") + return errors.New("unable to parse nuclei template") } ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) nucleiFile.Close() @@ -188,24 +202,24 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) - } fd, err := os.Create(configFilePath) if err != nil { - return fmt.Errorf("open: %s", err) + return fmt.Errorf("open: %w", err) } data, err := yaml.Marshal(configFileData) if err != nil { - return fmt.Errorf("marshal: %s", err) + return fmt.Errorf("marshal: %w", err) } _, err = fd.Write(data) if err != nil { - return fmt.Errorf("write: %s", err) + return fmt.Errorf("write: %w", err) } if err := fd.Close(); err != nil { - return fmt.Errorf("close: %s", err) + return fmt.Errorf("close: %w", err) } + return nil }, } @@ -219,20 +233,25 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios return cmd } -func (cli cliHubTest) NewRunCmd() *cobra.Command { - var noClean bool - var runAll bool - var forceClean bool - var NucleiTargetHost string - var AppSecHost string - var cmd = &cobra.Command{ +func (cli *cliHubTest) NewRunCmd() *cobra.Command { + var ( + noClean bool + runAll bool + forceClean bool + NucleiTargetHost string + AppSecHost string + ) + + cmd := &cobra.Command{ Use: "run", Short: "run [test_name]", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() + if !runAll && len(args) == 0 { printHelp(cmd) - return fmt.Errorf("please provide test to run or --all flag") + return errors.New("please provide test to run or --all flag") } hubPtr.NucleiTargetHost = NucleiTargetHost hubPtr.AppSecHost = AppSecHost @@ -244,7 +263,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { for _, testName := range args { _, err := hubPtr.LoadTestItem(testName) if err != nil { - return fmt.Errorf("unable to load test '%s': %s", testName, err) + return fmt.Errorf("unable to load test '%s': %w", testName, err) } } } @@ -252,7 +271,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { // set timezone to avoid DST issues os.Setenv("TZ", "UTC") for _, test := range hubPtr.Tests { - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { log.Infof("Running test '%s'", test.Name) } err := test.Run() @@ -264,6 +283,8 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { return nil }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + success := true testResult := make(map[string]bool) for _, test := range hubPtr.Tests { @@ -280,7 +301,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { } if !noClean { if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } fmt.Printf("\nPlease fill your assert file(s) for test '%s', exiting\n", test.Name) @@ -288,18 +309,18 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { } testResult[test.Name] = test.Success if test.Success { - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) } if !noClean { if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } } else { success = false cleanTestEnv := false - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { if len(test.ParserAssert.Fails) > 0 { fmt.Println() log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) @@ -330,20 +351,20 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { Default: true, } if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { - return fmt.Errorf("unable to ask to remove runtime folder: %s", err) + return fmt.Errorf("unable to ask to remove runtime folder: %w", err) } } } if cleanTestEnv || forceClean { if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } } } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": hubTestResultTable(color.Output, testResult) case "json": @@ -359,11 +380,11 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { } jsonStr, err := json.Marshal(jsonResult) if err != nil { - return fmt.Errorf("unable to json test result: %s", err) + return fmt.Errorf("unable to json test result: %w", err) } fmt.Println(string(jsonStr)) default: - return fmt.Errorf("only human/json output modes are supported") + return errors.New("only human/json output modes are supported") } if !success { @@ -383,7 +404,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewCleanCmd() *cobra.Command { +func (cli *cliHubTest) NewCleanCmd() *cobra.Command { var cmd = &cobra.Command{ Use: "clean", Short: "clean [test_name]", @@ -393,10 +414,10 @@ func (cli cliHubTest) NewCleanCmd() *cobra.Command { for _, testName := range args { test, err := hubPtr.LoadTestItem(testName) if err != nil { - return fmt.Errorf("unable to load test '%s': %s", testName, err) + return fmt.Errorf("unable to load test '%s': %w", testName, err) } if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } @@ -407,7 +428,7 @@ func (cli cliHubTest) NewCleanCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewInfoCmd() *cobra.Command { +func (cli *cliHubTest) NewInfoCmd() *cobra.Command { cmd := &cobra.Command{ Use: "info", Short: "info [test_name]", @@ -417,7 +438,7 @@ func (cli cliHubTest) NewInfoCmd() *cobra.Command { for _, testName := range args { test, err := hubPtr.LoadTestItem(testName) if err != nil { - return fmt.Errorf("unable to load test '%s': %s", testName, err) + return fmt.Errorf("unable to load test '%s': %w", testName, err) } fmt.Println() fmt.Printf(" Test name : %s\n", test.Name) @@ -440,17 +461,19 @@ func (cli cliHubTest) NewInfoCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewListCmd() *cobra.Command { +func (cli *cliHubTest) NewListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %s", err) + return fmt.Errorf("unable to load all tests: %w", err) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": hubTestListTable(color.Output, hubPtr.Tests) case "json": @@ -460,7 +483,7 @@ func (cli cliHubTest) NewListCmd() *cobra.Command { } fmt.Println(string(j)) default: - return fmt.Errorf("only human/json output modes are supported") + return errors.New("only human/json output modes are supported") } return nil @@ -470,18 +493,22 @@ func (cli cliHubTest) NewListCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewCoverageCmd() *cobra.Command { - var showParserCov bool - var showScenarioCov bool - var showOnlyPercent bool - var showAppsecCov bool +func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) cmd := &cobra.Command{ Use: "coverage", Short: "coverage", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - //for this one we explicitly don't do for appsec + cfg := cli.cfg() + + // for this one we explicitly don't do for appsec if err := HubTest.LoadAllTests(); err != nil { return fmt.Errorf("unable to load all tests: %+v", err) } @@ -499,7 +526,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { if showParserCov || showAll { parserCoverage, err = HubTest.GetParsersCoverage() if err != nil { - return fmt.Errorf("while getting parser coverage: %s", err) + return fmt.Errorf("while getting parser coverage: %w", err) } parserTested := 0 for _, test := range parserCoverage { @@ -513,7 +540,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { if showScenarioCov || showAll { scenarioCoverage, err = HubTest.GetScenariosCoverage() if err != nil { - return fmt.Errorf("while getting scenario coverage: %s", err) + return fmt.Errorf("while getting scenario coverage: %w", err) } scenarioTested := 0 @@ -529,7 +556,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { if showAppsecCov || showAll { appsecRuleCoverage, err = HubTest.GetAppsecCoverage() if err != nil { - return fmt.Errorf("while getting scenario coverage: %s", err) + return fmt.Errorf("while getting scenario coverage: %w", err) } appsecRuleTested := 0 @@ -542,19 +569,20 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { } if showOnlyPercent { - if showAll { + switch { + case showAll: fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - } else if showParserCov { + case showParserCov: fmt.Printf("parsers=%d%%", parserCoveragePercent) - } else if showScenarioCov { + case showScenarioCov: fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - } else if showAppsecCov { + case showAppsecCov: fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) } os.Exit(0) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": if showParserCov || showAll { hubTestParserCoverageTable(color.Output, parserCoverage) @@ -595,7 +623,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { } fmt.Printf("%s", dump) default: - return fmt.Errorf("only human/json output modes are supported") + return errors.New("only human/json output modes are supported") } return nil @@ -610,7 +638,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewEvalCmd() *cobra.Command { +func (cli *cliHubTest) NewEvalCmd() *cobra.Command { var evalExpression string cmd := &cobra.Command{ @@ -647,7 +675,7 @@ func (cli cliHubTest) NewEvalCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewExplainCmd() *cobra.Command { +func (cli *cliHubTest) NewExplainCmd() *cobra.Command { cmd := &cobra.Command{ Use: "explain", Short: "explain [test_name]", @@ -666,7 +694,7 @@ func (cli cliHubTest) NewExplainCmd() *cobra.Command { } if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %s", err) + return fmt.Errorf("unable to load parser result after run: %w", err) } } @@ -677,7 +705,7 @@ func (cli cliHubTest) NewExplainCmd() *cobra.Command { } if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %s", err) + return fmt.Errorf("unable to load scenario result after run: %w", err) } } opts := dumps.DumpOpts{} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1f87390b636..446901e4aa9 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -240,12 +240,12 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICapi().NewCommand()) + cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIHubTest().NewCommand()) + cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) From 4bf640c6e86185b506fde7332a338ccf2eb711ca Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:03:50 +0100 Subject: [PATCH 037/318] refact pkg/apiserver (auth helpers) (#2856) --- pkg/apiserver/controllers/v1/alerts.go | 5 +--- pkg/apiserver/controllers/v1/heartbeat.go | 5 +--- pkg/apiserver/controllers/v1/metrics.go | 34 ++++++++++------------- pkg/apiserver/controllers/v1/utils.go | 32 +++++++++++++++++---- pkg/apiserver/middlewares/v1/api_key.go | 11 ++------ pkg/apiserver/middlewares/v1/jwt.go | 8 +++--- 6 files changed, 50 insertions(+), 45 deletions(-) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index e7d106d72a3..ad183e4ba80 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -9,7 +9,6 @@ import ( "strings" "time" - jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -143,9 +142,7 @@ func normalizeScope(scope string) string { func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest - claims := jwt.ExtractClaims(gctx) - // TBD: use defined rather than hardcoded key to find back owner - machineID := claims["id"].(string) + machineID, _ := getMachineIDFromContext(gctx) if err := gctx.ShouldBindJSON(&input); err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) diff --git a/pkg/apiserver/controllers/v1/heartbeat.go b/pkg/apiserver/controllers/v1/heartbeat.go index b19b450f0d5..e1231eaa9ec 100644 --- a/pkg/apiserver/controllers/v1/heartbeat.go +++ b/pkg/apiserver/controllers/v1/heartbeat.go @@ -3,14 +3,11 @@ package v1 import ( "net/http" - jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" ) func (c *Controller) HeartBeat(gctx *gin.Context) { - claims := jwt.ExtractClaims(gctx) - // TBD: use defined rather than hardcoded key to find back owner - machineID := claims["id"].(string) + machineID, _ := getMachineIDFromContext(gctx) if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { c.HandleDBErrors(gctx, err) diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go index 13ccf9ac94f..ddb38512a11 100644 --- a/pkg/apiserver/controllers/v1/metrics.go +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -3,7 +3,6 @@ package v1 import ( "time" - jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" ) @@ -66,32 +65,29 @@ var LapiResponseTime = prometheus.NewHistogramVec( []string{"endpoint", "method"}) func PrometheusBouncersHasEmptyDecision(c *gin.Context) { - name, ok := c.Get("BOUNCER_NAME") - if ok { + bouncer, _ := getBouncerFromContext(c) + if bouncer != nil { LapiNilDecisions.With(prometheus.Labels{ - "bouncer": name.(string)}).Inc() + "bouncer": bouncer.Name}).Inc() } } func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { - name, ok := c.Get("BOUNCER_NAME") - if ok { + bouncer, _ := getBouncerFromContext(c) + if bouncer != nil { LapiNonNilDecisions.With(prometheus.Labels{ - "bouncer": name.(string)}).Inc() + "bouncer": bouncer.Name}).Inc() } } func PrometheusMachinesMiddleware() gin.HandlerFunc { return func(c *gin.Context) { - claims := jwt.ExtractClaims(c) - if claims != nil { - if rawID, ok := claims["id"]; ok { - machineID := rawID.(string) - LapiMachineHits.With(prometheus.Labels{ - "machine": machineID, - "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() - } + machineID, _ := getMachineIDFromContext(c) + if machineID != "" { + LapiMachineHits.With(prometheus.Labels{ + "machine": machineID, + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() } c.Next() @@ -100,10 +96,10 @@ func PrometheusMachinesMiddleware() gin.HandlerFunc { func PrometheusBouncersMiddleware() gin.HandlerFunc { return func(c *gin.Context) { - name, ok := c.Get("BOUNCER_NAME") - if ok { + bouncer, _ := getBouncerFromContext(c) + if bouncer != nil { LapiBouncerHits.With(prometheus.Labels{ - "bouncer": name.(string), + "bouncer": bouncer.Name, "route": c.Request.URL.Path, "method": c.Request.Method}).Inc() } diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go index 6afd005132a..6f14dd9204e 100644 --- a/pkg/apiserver/controllers/v1/utils.go +++ b/pkg/apiserver/controllers/v1/utils.go @@ -1,30 +1,50 @@ package v1 import ( - "fmt" + "errors" "net/http" + jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database/ent" ) -const bouncerContextKey = "bouncer_info" - func getBouncerFromContext(ctx *gin.Context) (*ent.Bouncer, error) { - bouncerInterface, exist := ctx.Get(bouncerContextKey) + bouncerInterface, exist := ctx.Get(middlewares.BouncerContextKey) if !exist { - return nil, fmt.Errorf("bouncer not found") + return nil, errors.New("bouncer not found") } bouncerInfo, ok := bouncerInterface.(*ent.Bouncer) if !ok { - return nil, fmt.Errorf("bouncer not found") + return nil, errors.New("bouncer not found") } return bouncerInfo, nil } +func getMachineIDFromContext(ctx *gin.Context) (string, error) { + claims := jwt.ExtractClaims(ctx) + if claims == nil { + return "", errors.New("failed to extract claims") + } + + rawID, ok := claims[middlewares.MachineIDKey] + if !ok { + return "", errors.New("MachineID not found in claims") + } + + id, ok := rawID.(string) + if !ok { + // should never happen + return "", errors.New("failed to cast machineID to string") + } + + return id, nil +} + func (c *Controller) AbortRemoteIf(option bool) gin.HandlerFunc { return func(gctx *gin.Context) { incomingIP := gctx.ClientIP() diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 41ee15b4417..4e273371bfe 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -18,9 +18,9 @@ import ( const ( APIKeyHeader = "X-Api-Key" - bouncerContextKey = "bouncer_info" - // max allowed by bcrypt 72 = 54 bytes in base64 + BouncerContextKey = "bouncer_info" dummyAPIKeySize = 54 + // max allowed by bcrypt 72 = 54 bytes in base64 ) type APIKey struct { @@ -159,11 +159,6 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { "name": bouncer.Name, }) - // maybe we want to store the whole bouncer object in the context instead, this would avoid another db query - // in StreamDecision - c.Set("BOUNCER_NAME", bouncer.Name) - c.Set("BOUNCER_HASHED_KEY", bouncer.APIKey) - if bouncer.IPAddress == "" { if err := a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) @@ -203,7 +198,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } } - c.Set(bouncerContextKey, bouncer) + c.Set(BouncerContextKey, bouncer) c.Next() } } diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index ed4ad107b96..6fe053713bc 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var identityKey = "id" +const MachineIDKey = "id" type JWT struct { Middleware *jwt.GinJWTMiddleware @@ -33,7 +33,7 @@ type JWT struct { func PayloadFunc(data interface{}) jwt.MapClaims { if value, ok := data.(*models.WatcherAuthRequest); ok { return jwt.MapClaims{ - identityKey: &value.MachineID, + MachineIDKey: &value.MachineID, } } @@ -42,7 +42,7 @@ func PayloadFunc(data interface{}) jwt.MapClaims { func IdentityHandler(c *gin.Context) interface{} { claims := jwt.ExtractClaims(c) - machineID := claims[identityKey].(string) + machineID := claims[MachineIDKey].(string) return &models.WatcherAuthRequest{ MachineID: &machineID, @@ -307,7 +307,7 @@ func NewJWT(dbClient *database.Client) (*JWT, error) { Key: secret, Timeout: time.Hour, MaxRefresh: time.Hour, - IdentityKey: identityKey, + IdentityKey: MachineIDKey, PayloadFunc: PayloadFunc, IdentityHandler: IdentityHandler, Authenticator: jwtMiddleware.Authenticator, From a23fe06d6878c2770c524e905bcf7fd611abb3cc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Feb 2024 16:05:01 +0100 Subject: [PATCH 038/318] remove dependencies on enescakir/emoji, gotest.tools (#2837) * wrap emoji package in pkg/emoji * remove dependency on enescakir/emoji * remove dependency on gotest.tools * lint (whitespace) --- cmd/crowdsec-cli/bouncers_table.go | 10 ++-- cmd/crowdsec-cli/console_table.go | 14 ++--- cmd/crowdsec-cli/hubtest.go | 2 +- cmd/crowdsec-cli/hubtest_table.go | 21 ++++---- cmd/crowdsec-cli/machines_table.go | 11 ++-- cmd/crowdsec-cli/notifications_table.go | 14 +++-- cmd/crowdsec-cli/utils_table.go | 5 +- go.mod | 3 +- go.sum | 2 - pkg/acquisition/modules/loki/loki_test.go | 62 +++++++++++++++++++---- pkg/cwhub/item.go | 5 +- pkg/cwhub/itemupgrade.go | 5 +- pkg/dumps/parser_dump.go | 39 +++++++------- pkg/emoji/emoji.go | 14 +++++ 14 files changed, 140 insertions(+), 67 deletions(-) create mode 100644 pkg/emoji/emoji.go diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go index 0ea725f5598..5fe48b49047 100644 --- a/cmd/crowdsec-cli/bouncers_table.go +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -5,9 +5,9 @@ import ( "time" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { @@ -17,11 +17,9 @@ func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) for _, b := range bouncers { - var revoked string - if !b.Revoked { - revoked = emoji.CheckMark.String() - } else { - revoked = emoji.Prohibited.String() + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited } t.AddRow(b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index e71ea8113fb..8f7ebb2100c 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -4,9 +4,9 @@ import ( "io" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { @@ -17,28 +17,28 @@ func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) for _, option := range csconfig.CONSOLE_CONFIGS { - activated := string(emoji.CrossMark) + activated := emoji.CrossMark switch option { case csconfig.SEND_CUSTOM_SCENARIOS: if *consoleCfg.ShareCustomScenarios { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.SEND_MANUAL_SCENARIOS: if *consoleCfg.ShareManualDecisions { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.SEND_TAINTED_SCENARIOS: if *consoleCfg.ShareTaintedScenarios { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.SEND_CONTEXT: if *consoleCfg.ShareContext { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.CONSOLE_MANAGEMENT: if *consoleCfg.ConsoleManagement { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 8f5ab087370..d6ed4560056 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -11,13 +11,13 @@ import ( "text/template" "github.com/AlecAivazis/survey/v2" - "github.com/enescakir/emoji" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/dumps" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go index 4034da7e519..e6c5ee80abd 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -5,8 +5,8 @@ import ( "io" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -17,9 +17,9 @@ func hubTestResultTable(out io.Writer, testResult map[string]bool) { t.SetAlignment(table.AlignLeft) for testName, success := range testResult { - status := emoji.CheckMarkButton.String() + status := emoji.CheckMarkButton if !success { - status = emoji.CrossMark.String() + status = emoji.CrossMark } t.AddRow(testName, status) @@ -50,11 +50,12 @@ func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.Coverage) { parserTested := 0 for _, test := range coverage { - status := emoji.RedCircle.String() + status := emoji.RedCircle if test.TestsCount > 0 { - status = emoji.GreenCircle.String() + status = emoji.GreenCircle parserTested++ } + t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) } @@ -70,11 +71,12 @@ func hubTestAppsecRuleCoverageTable(out io.Writer, coverage []hubtest.Coverage) parserTested := 0 for _, test := range coverage { - status := emoji.RedCircle.String() + status := emoji.RedCircle if test.TestsCount > 0 { - status = emoji.GreenCircle.String() + status = emoji.GreenCircle parserTested++ } + t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) } @@ -90,11 +92,12 @@ func hubTestScenarioCoverageTable(out io.Writer, coverage []hubtest.Coverage) { parserTested := 0 for _, test := range coverage { - status := emoji.RedCircle.String() + status := emoji.RedCircle if test.TestsCount > 0 { - status = emoji.GreenCircle.String() + status = emoji.GreenCircle parserTested++ } + t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) } diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go index e166fb785a6..120929ea654 100644 --- a/cmd/crowdsec-cli/machines_table.go +++ b/cmd/crowdsec-cli/machines_table.go @@ -5,9 +5,9 @@ import ( "time" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func getAgentsTable(out io.Writer, machines []*ent.Machine) { @@ -17,17 +17,16 @@ func getAgentsTable(out io.Writer, machines []*ent.Machine) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) for _, m := range machines { - var validated string + validated := emoji.Prohibited if m.IsValidated { - validated = emoji.CheckMark.String() - } else { - validated = emoji.Prohibited.String() + validated = emoji.CheckMark } hb, active := getLastHeartbeat(m) if !active { - hb = emoji.Warning.String() + " " + hb + hb = emoji.Warning + " " + hb } + t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb) } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go index e0f61d9cebe..19d11cea741 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/notifications_table.go @@ -6,7 +6,8 @@ import ( "strings" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { @@ -14,24 +15,31 @@ func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { t.SetHeaders("Active", "Name", "Type", "Profile name") t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + keys := make([]string, 0, len(ncfgs)) for k := range ncfgs { keys = append(keys, k) } + sort.Slice(keys, func(i, j int) bool { return len(ncfgs[keys[i]].Profiles) > len(ncfgs[keys[j]].Profiles) }) + for _, k := range keys { b := ncfgs[k] profilesList := []string{} + for _, p := range b.Profiles { profilesList = append(profilesList, p.Name) } - active := emoji.CheckMark.String() + + active := emoji.CheckMark if len(profilesList) == 0 { - active = emoji.Prohibited.String() + active = emoji.Prohibited } + t.AddRow(active, b.Config.Name, b.Config.Type, strings.Join(profilesList, ", ")) } + t.Render() } diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index b1e4b6950b3..23bcff4e5c6 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -6,9 +6,9 @@ import ( "strconv" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func listHubItemTable(out io.Writer, title string, items []*cwhub.Item) { @@ -21,6 +21,7 @@ func listHubItemTable(out io.Writer, title string, items []*cwhub.Item) { status := fmt.Sprintf("%v %s", item.State.Emoji(), item.State.Text()) t.AddRow(item.Name, status, item.State.LocalVersion, item.State.LocalPath) } + renderTableTitle(out, title) t.Render() } @@ -42,6 +43,7 @@ func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int if metrics["instantiation"] == 0 { return } + t := newTable(out) t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -72,6 +74,7 @@ func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[s strconv.Itoa(stats["parsed"]), strconv.Itoa(stats["unparsed"]), ) + showTable = true } } diff --git a/go.mod b/go.mod index e1da18387a5..c2d6ca2c148 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,6 @@ require ( github.com/dghubble/sling v1.3.0 github.com/docker/docker v24.0.7+incompatible github.com/docker/go-connections v0.4.0 - github.com/enescakir/emoji v1.0.0 github.com/fatih/color v1.15.0 github.com/fsnotify/fsnotify v1.6.0 github.com/gin-gonic/gin v1.9.1 @@ -92,7 +91,6 @@ require ( gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - gotest.tools/v3 v3.5.0 k8s.io/apiserver v0.28.4 ) @@ -210,6 +208,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gotest.tools/v3 v3.5.0 // indirect k8s.io/api v0.28.4 // indirect k8s.io/apimachinery v0.28.4 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/go.sum b/go.sum index 2daf22cc99c..7e860300089 100644 --- a/go.sum +++ b/go.sum @@ -124,8 +124,6 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= -github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 6cac1c0fec3..8511d5445af 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -2,6 +2,7 @@ package loki_test import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -13,19 +14,17 @@ import ( "testing" "time" - "context" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + tomb "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - tomb "gopkg.in/tomb.v2" - "gotest.tools/v3/assert" ) func TestConfiguration(t *testing.T) { - log.Infof("Test 'TestConfigure'") tests := []struct { @@ -127,22 +126,26 @@ query: > subLogger := log.WithFields(log.Fields{ "type": "loki", }) + for _, test := range tests { t.Run(test.testName, func(t *testing.T) { lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(test.config), subLogger) cstest.AssertErrorContains(t, err, test.expectedErr) + if test.password != "" { p := lokiSource.Config.Auth.Password if test.password != p { t.Fatalf("Password mismatch : %s != %s", test.password, p) } } + if test.waitForReady != 0 { if lokiSource.Config.WaitForReady != test.waitForReady { t.Fatalf("Wrong WaitForReady %v != %v", lokiSource.Config.WaitForReady, test.waitForReady) } } + if test.delayFor != 0 { if lokiSource.Config.DelayFor != test.delayFor { t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) @@ -154,6 +157,7 @@ query: > func TestConfigureDSN(t *testing.T) { log.Infof("Test 'TestConfigureDSN'") + tests := []struct { name string dsn string @@ -218,7 +222,9 @@ func TestConfigureDSN(t *testing.T) { "type": "loki", "name": test.name, }) + t.Logf("Test : %s", test.name) + lokiSource := &loki.LokiSource{} err := lokiSource.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") cstest.AssertErrorContains(t, err, test.expectedErr) @@ -234,17 +240,20 @@ func TestConfigureDSN(t *testing.T) { t.Fatalf("Password mismatch : %s != %s", test.password, p) } } + if test.scheme != "" { url, _ := url.Parse(lokiSource.Config.URL) if test.scheme != url.Scheme { t.Fatalf("Schema mismatch : %s != %s", test.scheme, url.Scheme) } } + if test.waitForReady != 0 { if lokiSource.Config.WaitForReady != test.waitForReady { t.Fatalf("Wrong WaitForReady %v != %v", lokiSource.Config.WaitForReady, test.waitForReady) } } + if test.delayFor != 0 { if lokiSource.Config.DelayFor != test.delayFor { t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) @@ -272,27 +281,36 @@ func feedLoki(logger *log.Entry, n int, title string) error { Line: fmt.Sprintf("Log line #%d %v", i, title), } } + buff, err := json.Marshal(streams) if err != nil { return err } + req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } + req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Scope-OrgID", "1234") + resp, err := http.DefaultClient.Do(req) if err != nil { return err } + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { b, _ := io.ReadAll(resp.Body) logger.Error(string(b)) + return fmt.Errorf("Bad post status %d", resp.StatusCode) } + logger.Info(n, " Events sent") + return nil } @@ -300,9 +318,11 @@ func TestOneShotAcquisition(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") + title := time.Now().String() // Loki will be messy, with a lot of stuff, lets use a unique key tests := []struct { config string @@ -327,6 +347,7 @@ since: 1h }) lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger) + if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -338,19 +359,23 @@ since: 1h out := make(chan types.Event) read := 0 + go func() { for { <-out + read++ } }() + lokiTomb := tomb.Tomb{} + err = lokiSource.OneShotAcquisition(out, &lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } - assert.Equal(t, 20, read) + assert.Equal(t, 20, read) } } @@ -358,9 +383,11 @@ func TestStreamingAcquisition(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") + title := time.Now().String() tests := []struct { name string @@ -396,6 +423,7 @@ query: > expectedLines: 20, }, } + for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -407,10 +435,12 @@ query: > out := make(chan types.Event) lokiTomb := tomb.Tomb{} lokiSource := loki.LokiSource{} + err := lokiSource.Configure([]byte(ts.config), subLogger) if err != nil { t.Fatalf("Unexpected error : %s", err) } + err = lokiSource.StreamingAcquisition(out, &lokiTomb) cstest.AssertErrorContains(t, err, ts.streamErr) @@ -418,22 +448,26 @@ query: > return } - time.Sleep(time.Second * 2) //We need to give time to start reading from the WS + time.Sleep(time.Second * 2) // We need to give time to start reading from the WS + readTomb := tomb.Tomb{} readCtx, cancel := context.WithTimeout(context.Background(), time.Second*10) count := 0 readTomb.Go(func() error { defer cancel() + for { select { case <-readCtx.Done(): return readCtx.Err() case evt := <-out: count++ + if !strings.HasSuffix(evt.Line.Raw, title) { return fmt.Errorf("Incorrect suffix : %s", evt.Line.Raw) } + if count == ts.expectedLines { return nil } @@ -447,20 +481,23 @@ query: > } err = readTomb.Wait() + cancel() + if err != nil { t.Fatalf("Unexpected error : %s", err) } - assert.Equal(t, count, ts.expectedLines) + + assert.Equal(t, ts.expectedLines, count) }) } - } func TestStopStreaming(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + config := ` mode: tail source: loki @@ -476,24 +513,30 @@ query: > }) title := time.Now().String() lokiSource := loki.LokiSource{} + err := lokiSource.Configure([]byte(config), subLogger) if err != nil { t.Fatalf("Unexpected error : %s", err) } + out := make(chan types.Event) lokiTomb := &tomb.Tomb{} + err = lokiSource.StreamingAcquisition(out, lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } + time.Sleep(time.Second * 2) + err = feedLoki(subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } lokiTomb.Kill(nil) + err = lokiTomb.Wait() if err != nil { t.Fatalf("Unexpected error : %s", err) @@ -519,5 +562,6 @@ func (l *LogValue) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } + return []byte(fmt.Sprintf(`["%d",%s]`, l.Time.UnixNano(), string(line))), nil } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 6c7da06c313..6cdb5cadcb9 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -7,7 +7,8 @@ import ( "slices" "github.com/Masterminds/semver/v3" - "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) const ( @@ -84,7 +85,7 @@ func (s *ItemState) Text() string { } // Emoji returns the status of the item as an emoji (eg. emoji.Warning). -func (s *ItemState) Emoji() emoji.Emoji { +func (s *ItemState) Emoji() string { switch { case s.IsLocal(): return emoji.House diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index ac3b94f9836..6a8dc2f44b6 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -13,7 +13,7 @@ import ( "os" "path/filepath" - "github.com/enescakir/emoji" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) // Upgrade downloads and applies the last version of the item from the hub. @@ -60,6 +60,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { // TODO: use a better way to communicate this fmt.Printf("updated %s\n", i.Name) i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) + updated = true } @@ -151,7 +152,7 @@ func (i *Item) FetchLatest() ([]byte, string, error) { i.hub.logger.Errorf("Downloaded version doesn't match index, please 'hub update'") i.hub.logger.Debugf("got %s, expected %s", meow, i.Versions[i.Version].Digest) - return nil, "", fmt.Errorf("invalid download hash") + return nil, "", errors.New("invalid download hash") } return body, url, nil diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 566b87a0803..9b4cdb1c22b 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -1,6 +1,7 @@ package dumps import ( + "errors" "fmt" "io" "os" @@ -8,13 +9,15 @@ import ( "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/maptools" - "github.com/enescakir/emoji" "github.com/fatih/color" diff "github.com/r3labs/diff/v2" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" + "github.com/crowdsecurity/crowdsec/pkg/types" ) type ParserResult struct { @@ -56,7 +59,7 @@ func LoadParserDump(filepath string) (*ParserResults, error) { var lastStage string - //Loop over stages to find last successful one with at least one parser + // Loop over stages to find last successful one with at least one parser for i := len(stages) - 2; i >= 0; i-- { if len(pdump[stages[i]]) != 0 { lastStage = stages[i] @@ -73,7 +76,7 @@ func LoadParserDump(filepath string) (*ParserResults, error) { sort.Strings(parsers) if len(parsers) == 0 { - return nil, fmt.Errorf("no parser found. Please install the appropriate parser and retry") + return nil, errors.New("no parser found. Please install the appropriate parser and retry") } lastParser := parsers[len(parsers)-1] @@ -90,14 +93,15 @@ func LoadParserDump(filepath string) (*ParserResults, error) { } func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpOpts) { - //note : we can use line -> time as the unique identifier (of acquisition) + // note : we can use line -> time as the unique identifier (of acquisition) state := make(map[time.Time]map[string]map[string]ParserResult) assoc := make(map[time.Time]string, 0) parser_order := make(map[string][]string) for stage, parsers := range parserResults { - //let's process parsers in the order according to idx + // let's process parsers in the order according to idx parser_order[stage] = make([]string, len(parsers)) + for pname, parser := range parsers { if len(parser) > 0 { parser_order[stage][parser[0].Idx-1] = pname @@ -128,14 +132,14 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO continue } - //it might be bucket overflow being reprocessed, skip this + // it might be bucket overflow being reprocessed, skip this if _, ok := state[evt.Line.Time]; !ok { state[evt.Line.Time] = make(map[string]map[string]ParserResult) assoc[evt.Line.Time] = evt.Line.Raw } - //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase - //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if _, ok := state[evt.Line.Time]["buckets"]; !ok { state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) } @@ -148,7 +152,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO red := color.New(color.FgRed).SprintFunc() green := color.New(color.FgGreen).SprintFunc() whitelistReason := "" - //get each line + // get each line for tstamp, rawstr := range assoc { if opts.SkipOk { if _, ok := state[tstamp]["buckets"]["OK"]; ok { @@ -161,8 +165,8 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO skeys := make([]string, 0, len(state[tstamp])) for k := range state[tstamp] { - //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase - //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "buckets" { continue } @@ -216,6 +220,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO whitelistReason = parsers[parser].Evt.WhitelistReason } } + updated++ case "delete": deleted++ @@ -277,7 +282,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO sep = "├" } - //did the event enter the bucket pour phase ? + // did the event enter the bucket pour phase ? if _, ok := state[tstamp]["buckets"]["OK"]; ok { fmt.Printf("\t%s-------- parser success %s\n", sep, emoji.GreenCircle) } else if whitelistReason != "" { @@ -286,7 +291,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO fmt.Printf("\t%s-------- parser failure %s\n", sep, emoji.RedCircle) } - //now print bucket info + // now print bucket info if len(state[tstamp]["buckets"]) > 0 { fmt.Printf("\t├ Scenarios\n") } @@ -294,8 +299,8 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO bnames := make([]string, 0, len(state[tstamp]["buckets"])) for k := range state[tstamp]["buckets"] { - //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase - //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "OK" { continue } diff --git a/pkg/emoji/emoji.go b/pkg/emoji/emoji.go new file mode 100644 index 00000000000..51295a85411 --- /dev/null +++ b/pkg/emoji/emoji.go @@ -0,0 +1,14 @@ +package emoji + +const ( + CheckMarkButton = "\u2705" // ✅ + CheckMark = "\u2714\ufe0f" // ✔️ + CrossMark = "\u274c" // ❌ + GreenCircle = "\U0001f7e2" // 🟢 + House = "\U0001f3e0" // 🏠 + Package = "\U0001f4e6" // 📦 + Prohibited = "\U0001f6ab" // 🚫 + QuestionMark = "\u2753" // ❓ + RedCircle = "\U0001f534" // 🔴 + Warning = "\u26a0\ufe0f" // ⚠️ +) From 8e9e091656f2a72a37e21c173ed0c2015e97e726 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 26 Feb 2024 13:44:40 +0100 Subject: [PATCH 039/318] systemd: check configuration before attempting reload (#2861) --- config/crowdsec.service | 1 + debian/crowdsec.service | 1 + rpm/SOURCES/crowdsec.unit.patch | 2 ++ 3 files changed, 4 insertions(+) diff --git a/config/crowdsec.service b/config/crowdsec.service index 147cae4946e..65a8d30bc5f 100644 --- a/config/crowdsec.service +++ b/config/crowdsec.service @@ -8,6 +8,7 @@ Environment=LC_ALL=C LANG=C ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml #ExecStartPost=/bin/sleep 0.1 +ExecReload=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=60 diff --git a/debian/crowdsec.service b/debian/crowdsec.service index b65558f70d3..c1a5e403745 100644 --- a/debian/crowdsec.service +++ b/debian/crowdsec.service @@ -8,6 +8,7 @@ Environment=LC_ALL=C LANG=C ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml #ExecStartPost=/bin/sleep 0.1 +ExecReload=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=60 diff --git a/rpm/SOURCES/crowdsec.unit.patch b/rpm/SOURCES/crowdsec.unit.patch index af9fe5c31e3..5173beb2710 100644 --- a/rpm/SOURCES/crowdsec.unit.patch +++ b/rpm/SOURCES/crowdsec.unit.patch @@ -9,5 +9,7 @@ +ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error +ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml #ExecStartPost=/bin/sleep 0.1 +-ExecReload=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ++ExecReload=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecReload=/bin/kill -HUP $MAINPID Restart=always From 41b43733b03d9fa3609c2ff333461bfa49ae587f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 1 Mar 2024 10:52:35 +0100 Subject: [PATCH 040/318] fix: log stack trace while computing metrics (#2865) --- cmd/crowdsec/metrics.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 1199af0fe16..563bb56bfc9 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -102,6 +102,8 @@ var globalPourHistogram = prometheus.NewHistogramVec( func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // catch panics here because they are not handled by servePrometheus + defer trace.CatchPanic("crowdsec/computeDynamicMetrics") //update cache metrics (stash) cache.UpdateCacheMetrics() //update cache metrics (regexp) From e7ecea764e99f873e1644b409d8eebec8e1c2630 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 4 Mar 2024 14:22:53 +0100 Subject: [PATCH 041/318] pkg/csconfig: use yaml.v3; deprecate yaml.v2 for new code (#2867) * pkg/csconfig: use yaml.v3; deprecate yaml.v2 for new code * yaml.v3: handle empty files * Lint whitespace, errors --- .golangci.yml | 50 ++++++++++++++++++++++++++++++++ pkg/csconfig/api.go | 33 ++++++++++++++------- pkg/csconfig/api_test.go | 12 ++++++-- pkg/csconfig/config.go | 17 +++++++---- pkg/csconfig/config_test.go | 2 +- pkg/csconfig/console.go | 12 ++++++-- pkg/csconfig/crowdsec_service.go | 11 +++---- pkg/csconfig/database.go | 9 +++++- pkg/csconfig/profiles.go | 23 ++++++++++----- pkg/csconfig/simulation.go | 23 +++++++++++++-- pkg/csconfig/simulation_test.go | 4 +-- 11 files changed, 153 insertions(+), 43 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 29332447b61..396da2141f1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -72,6 +72,56 @@ linters-settings: deny: - pkg: "github.com/pkg/errors" desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" + yaml: + files: + - "!**/cmd/crowdsec-cli/alerts.go" + - "!**/cmd/crowdsec-cli/capi.go" + - "!**/cmd/crowdsec-cli/config_show.go" + - "!**/cmd/crowdsec-cli/hubtest.go" + - "!**/cmd/crowdsec-cli/lapi.go" + - "!**/cmd/crowdsec-cli/simulation.go" + - "!**/cmd/crowdsec/crowdsec.go" + - "!**/cmd/notification-dummy/main.go" + - "!**/cmd/notification-email/main.go" + - "!**/cmd/notification-http/main.go" + - "!**/cmd/notification-slack/main.go" + - "!**/cmd/notification-splunk/main.go" + - "!**/pkg/acquisition/acquisition.go" + - "!**/pkg/acquisition/acquisition_test.go" + - "!**/pkg/acquisition/modules/appsec/appsec.go" + - "!**/pkg/acquisition/modules/cloudwatch/cloudwatch.go" + - "!**/pkg/acquisition/modules/docker/docker.go" + - "!**/pkg/acquisition/modules/file/file.go" + - "!**/pkg/acquisition/modules/journalctl/journalctl.go" + - "!**/pkg/acquisition/modules/kafka/kafka.go" + - "!**/pkg/acquisition/modules/kinesis/kinesis.go" + - "!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go" + - "!**/pkg/acquisition/modules/loki/loki.go" + - "!**/pkg/acquisition/modules/loki/timestamp_test.go" + - "!**/pkg/acquisition/modules/s3/s3.go" + - "!**/pkg/acquisition/modules/syslog/syslog.go" + - "!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go" + - "!**/pkg/appsec/appsec.go" + - "!**/pkg/appsec/loader.go" + - "!**/pkg/csplugin/broker.go" + - "!**/pkg/csplugin/broker_test.go" + - "!**/pkg/dumps/bucker_dump.go" + - "!**/pkg/dumps/bucket_dump.go" + - "!**/pkg/dumps/parser_dump.go" + - "!**/pkg/hubtest/coverage.go" + - "!**/pkg/hubtest/hubtest_item.go" + - "!**/pkg/hubtest/parser_assert.go" + - "!**/pkg/hubtest/scenario_assert.go" + - "!**/pkg/leakybucket/buckets_test.go" + - "!**/pkg/leakybucket/manager_load.go" + - "!**/pkg/metabase/metabase.go" + - "!**/pkg/parser/node.go" + - "!**/pkg/parser/node_test.go" + - "!**/pkg/parser/parsing_test.go" + - "!**/pkg/parser/stage.go" + deny: + - pkg: "gopkg.in/yaml.v2" + desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" wsl: # Allow blocks to end with comments diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index de8ee4934a7..7fd1f588897 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -1,6 +1,7 @@ package csconfig import ( + "bytes" "crypto/tls" "crypto/x509" "errors" @@ -12,7 +13,7 @@ import ( "time" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -63,7 +64,7 @@ func (a *CTICfg) Load() error { } if a.Key != nil && *a.Key == "" { - return fmt.Errorf("empty cti key") + return errors.New("empty cti key") } if a.Enabled == nil { @@ -92,9 +93,14 @@ func (o *OnlineApiClientCfg) Load() error { return err } - err = yaml.UnmarshalStrict(fcontent, o.Credentials) + dec := yaml.NewDecoder(bytes.NewReader(fcontent)) + dec.KnownFields(true) + + err = dec.Decode(o.Credentials) if err != nil { - return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + if !errors.Is(err, io.EOF) { + return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + } } switch { @@ -120,9 +126,14 @@ func (l *LocalApiClientCfg) Load() error { return err } - err = yaml.UnmarshalStrict(fcontent, &l.Credentials) + dec := yaml.NewDecoder(bytes.NewReader(fcontent)) + dec.KnownFields(true) + + err = dec.Decode(&l.Credentials) if err != nil { - return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + if !errors.Is(err, io.EOF) { + return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + } } if l.Credentials == nil || l.Credentials.URL == "" { @@ -136,7 +147,7 @@ func (l *LocalApiClientCfg) Load() error { } if l.Credentials.Login != "" && (l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") { - return fmt.Errorf("user/password authentication and TLS authentication are mutually exclusive") + return errors.New("user/password authentication and TLS authentication are mutually exclusive") } if l.InsecureSkipVerify == nil { @@ -263,7 +274,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { } if c.API.Server.ListenURI == "" { - return fmt.Errorf("no listen_uri specified") + return errors.New("no listen_uri specified") } // inherit log level from common, then api->server @@ -350,7 +361,7 @@ func parseCapiWhitelists(fd io.Reader) (*CapiWhitelist, error) { decoder := yaml.NewDecoder(fd) if err := decoder.Decode(&fromCfg); err != nil { if errors.Is(err, io.EOF) { - return nil, fmt.Errorf("empty file") + return nil, errors.New("empty file") } return nil, err @@ -389,7 +400,7 @@ func (s *LocalApiServerCfg) LoadCapiWhitelists() error { fd, err := os.Open(s.CapiWhitelistsPath) if err != nil { - return fmt.Errorf("while opening capi whitelist file: %s", err) + return fmt.Errorf("while opening capi whitelist file: %w", err) } defer fd.Close() @@ -404,7 +415,7 @@ func (s *LocalApiServerCfg) LoadCapiWhitelists() error { func (c *Config) LoadAPIClient() error { if c.API == nil || c.API.Client == nil || c.API.Client.CredentialsFilePath == "" || c.DisableAgent { - return fmt.Errorf("no API client section in configuration") + return errors.New("no API client section in configuration") } if err := c.API.Client.Load(); err != nil { diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index e22c78204e7..b6febd4d450 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -9,7 +9,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -68,6 +68,7 @@ func TestLoadLocalApiClientCfg(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } @@ -125,6 +126,7 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } @@ -147,7 +149,11 @@ func TestLoadAPIServer(t *testing.T) { require.NoError(t, err) configData := os.ExpandEnv(string(fcontent)) - err = yaml.UnmarshalStrict([]byte(configData), &config) + + dec := yaml.NewDecoder(strings.NewReader(configData)) + dec.KnownFields(true) + + err = dec.Decode(&config) require.NoError(t, err) tests := []struct { @@ -242,6 +248,7 @@ func TestLoadAPIServer(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadAPIServer(false) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } @@ -305,6 +312,7 @@ func TestParseCapiWhitelists(t *testing.T) { t.Run(tc.name, func(t *testing.T) { wl, err := parseCapiWhitelists(strings.NewReader(tc.input)) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 2dc7ecc7d53..0c960803e04 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -1,14 +1,16 @@ // Package csconfig contains the configuration structures for crowdsec and cscli. - package csconfig import ( + "errors" "fmt" + "io" "os" "path/filepath" + "strings" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -57,10 +59,15 @@ func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool DisableAPI: disableAPI, } - err = yaml.UnmarshalStrict([]byte(configData), &cfg) + dec := yaml.NewDecoder(strings.NewReader(configData)) + dec.KnownFields(true) + + err = dec.Decode(&cfg) if err != nil { - // this is actually the "merged" yaml - return nil, "", fmt.Errorf("%s: %w", configFile, err) + if !errors.Is(err, io.EOF) { + // this is actually the "merged" yaml + return nil, "", fmt.Errorf("%s: %w", configFile, err) + } } if cfg.Prometheus == nil { diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 4843c2f70f9..56ecc202373 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/cstest" ) diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 1e8974154ec..01e74a94db4 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -5,7 +5,7 @@ import ( "os" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" ) @@ -41,6 +41,7 @@ func (c *ConsoleConfig) IsPAPIEnabled() bool { if c == nil || c.ConsoleManagement == nil { return false } + return *c.ConsoleManagement } @@ -48,31 +49,36 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error { c.ConsoleConfig = &ConsoleConfig{} if _, err := os.Stat(c.ConsoleConfigPath); err != nil && os.IsNotExist(err) { log.Debugf("no console configuration to load") + c.ConsoleConfig.ShareCustomScenarios = ptr.Of(true) c.ConsoleConfig.ShareTaintedScenarios = ptr.Of(true) c.ConsoleConfig.ShareManualDecisions = ptr.Of(false) c.ConsoleConfig.ConsoleManagement = ptr.Of(false) c.ConsoleConfig.ShareContext = ptr.Of(false) + return nil } yamlFile, err := os.ReadFile(c.ConsoleConfigPath) if err != nil { - return fmt.Errorf("reading console config file '%s': %s", c.ConsoleConfigPath, err) + return fmt.Errorf("reading console config file '%s': %w", c.ConsoleConfigPath, err) } + err = yaml.Unmarshal(yamlFile, c.ConsoleConfig) if err != nil { - return fmt.Errorf("unmarshaling console config file '%s': %s", c.ConsoleConfigPath, err) + return fmt.Errorf("unmarshaling console config file '%s': %w", c.ConsoleConfigPath, err) } if c.ConsoleConfig.ShareCustomScenarios == nil { log.Debugf("no share_custom scenarios found, setting to true") c.ConsoleConfig.ShareCustomScenarios = ptr.Of(true) } + if c.ConsoleConfig.ShareTaintedScenarios == nil { log.Debugf("no share_tainted scenarios found, setting to true") c.ConsoleConfig.ShareTaintedScenarios = ptr.Of(true) } + if c.ConsoleConfig.ShareManualDecisions == nil { log.Debugf("no share_manual scenarios found, setting to false") c.ConsoleConfig.ShareManualDecisions = ptr.Of(false) diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 36d38cf7481..7820595b46f 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -6,7 +6,7 @@ import ( "path/filepath" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" ) @@ -133,19 +133,16 @@ func (c *Config) LoadCrowdsec() error { } if err = c.LoadAPIClient(); err != nil { - return fmt.Errorf("loading api client: %s", err) + return fmt.Errorf("loading api client: %w", err) } return nil } func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { - var out []byte - var err error - // XXX: MakeDirs - - if out, err = yaml.Marshal(c.ContextToSend); err != nil { + out, err := yaml.Marshal(c.ContextToSend) + if err != nil { return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 5149b4ae39e..2df2207859d 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -1,6 +1,7 @@ package csconfig import ( + "errors" "fmt" "time" @@ -45,6 +46,7 @@ type AuthGCCfg struct { type FlushDBCfg struct { MaxItems *int `yaml:"max_items,omitempty"` + // We could unmarshal as time.Duration, but alert filters right now are a map of strings MaxAge *string `yaml:"max_age,omitempty"` BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` @@ -52,7 +54,7 @@ type FlushDBCfg struct { func (c *Config) LoadDBConfig(inCli bool) error { if c.DbConfig == nil { - return fmt.Errorf("no database configuration provided") + return errors.New("no database configuration provided") } if c.Cscli != nil { @@ -86,6 +88,7 @@ func (c *Config) LoadDBConfig(inCli bool) error { func (d *DatabaseCfg) ConnectionString() string { connString := "" + switch d.Type { case "sqlite": var sqliteConnectionStringParameters string @@ -94,6 +97,7 @@ func (d *DatabaseCfg) ConnectionString() string { } else { sqliteConnectionStringParameters = "_busy_timeout=100000&_fk=1" } + connString = fmt.Sprintf("file:%s?%s", d.DbPath, sqliteConnectionStringParameters) case "mysql": if d.isSocketConfig() { @@ -108,6 +112,7 @@ func (d *DatabaseCfg) ConnectionString() string { connString = fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s sslmode=%s", d.Host, d.Port, d.User, d.DbName, d.Password, d.Sslmode) } } + return connString } @@ -121,8 +126,10 @@ func (d *DatabaseCfg) ConnectionDialect() (string, string, error) { if d.Type != "pgx" { log.Debugf("database type '%s' is deprecated, switching to 'pgx' instead", d.Type) } + return "pgx", dialect.Postgres, nil } + return "", "", fmt.Errorf("unknown database type '%s'", d.Type) } diff --git a/pkg/csconfig/profiles.go b/pkg/csconfig/profiles.go index ad3779ed12f..6fbb8ed8b21 100644 --- a/pkg/csconfig/profiles.go +++ b/pkg/csconfig/profiles.go @@ -6,7 +6,7 @@ import ( "fmt" "io" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -23,43 +23,50 @@ import ( type ProfileCfg struct { Name string `yaml:"name,omitempty"` Debug *bool `yaml:"debug,omitempty"` - Filters []string `yaml:"filters,omitempty"` //A list of OR'ed expressions. the models.Alert object + Filters []string `yaml:"filters,omitempty"` // A list of OR'ed expressions. the models.Alert object Decisions []models.Decision `yaml:"decisions,omitempty"` DurationExpr string `yaml:"duration_expr,omitempty"` - OnSuccess string `yaml:"on_success,omitempty"` //continue or break - OnFailure string `yaml:"on_failure,omitempty"` //continue or break - OnError string `yaml:"on_error,omitempty"` //continue, break, error, report, apply, ignore + OnSuccess string `yaml:"on_success,omitempty"` // continue or break + OnFailure string `yaml:"on_failure,omitempty"` // continue or break + OnError string `yaml:"on_error,omitempty"` // continue, break, error, report, apply, ignore Notifications []string `yaml:"notifications,omitempty"` } func (c *LocalApiServerCfg) LoadProfiles() error { if c.ProfilesPath == "" { - return fmt.Errorf("empty profiles path") + return errors.New("empty profiles path") } patcher := yamlpatch.NewPatcher(c.ProfilesPath, ".local") + fcontent, err := patcher.PrependedPatchContent() if err != nil { return err } + reader := bytes.NewReader(fcontent) dec := yaml.NewDecoder(reader) - dec.SetStrict(true) + dec.KnownFields(true) + for { t := ProfileCfg{} + err = dec.Decode(&t) if err != nil { if errors.Is(err, io.EOF) { break } + return fmt.Errorf("while decoding %s: %w", c.ProfilesPath, err) } + c.Profiles = append(c.Profiles, &t) } if len(c.Profiles) == 0 { - return fmt.Errorf("zero profiles loaded for LAPI") + return errors.New("zero profiles loaded for LAPI") } + return nil } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index 0d09aa478ff..bf121ef56f9 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -1,10 +1,13 @@ package csconfig import ( + "bytes" + "errors" "fmt" + "io" "path/filepath" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/yamlpatch" ) @@ -20,37 +23,51 @@ func (s *SimulationConfig) IsSimulated(scenario string) bool { if s.Simulation != nil && *s.Simulation { simulated = true } + for _, excluded := range s.Exclusions { if excluded == scenario { simulated = !simulated break } } + return simulated } func (c *Config) LoadSimulation() error { simCfg := SimulationConfig{} + if c.ConfigPaths.SimulationFilePath == "" { c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") } patcher := yamlpatch.NewPatcher(c.ConfigPaths.SimulationFilePath, ".local") + rcfg, err := patcher.MergedPatchContent() if err != nil { return err } - if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil { - return fmt.Errorf("while unmarshaling simulation file '%s' : %s", c.ConfigPaths.SimulationFilePath, err) + + dec := yaml.NewDecoder(bytes.NewReader(rcfg)) + dec.KnownFields(true) + + if err := dec.Decode(&simCfg); err != nil { + if !errors.Is(err, io.EOF) { + return fmt.Errorf("while unmarshaling simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) + } } + if simCfg.Simulation == nil { simCfg.Simulation = new(bool) } + if c.Crowdsec != nil { c.Crowdsec.SimulationConfig = &simCfg } + if c.Cscli != nil { c.Cscli.SimulationConfig = &simCfg } + return nil } diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index 01f05e3975a..71b09ee397c 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -60,7 +60,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml' : yaml: unmarshal errors", + expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", }, { name: "basic bad file content", @@ -71,7 +71,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml' : yaml: unmarshal errors", + expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", }, } From d8877a71fc1faac18539cd29fa736deccaf4ea92 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:56:14 +0100 Subject: [PATCH 042/318] lp metrics: collect datasources and console options (#2870) --- cmd/crowdsec/crowdsec.go | 31 ++++++++++++++++++------------- cmd/crowdsec/main.go | 19 +++++++++++-------- cmd/crowdsec/serve.go | 14 +++++++------- pkg/csconfig/console.go | 29 +++++++++++++++++++++++++++++ test/bats/01_crowdsec.bats | 2 +- test/bats/01_crowdsec_lapi.bats | 2 +- 6 files changed, 67 insertions(+), 30 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index d4cd2d3cf74..0d7d454edf2 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -23,39 +23,42 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, error) { +// initCrowdsec prepares the log processor service +func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, []acquisition.DataSource, error) { var err error if err = alertcontext.LoadConsoleContext(cConfig, hub); err != nil { - return nil, fmt.Errorf("while loading context: %w", err) + return nil, nil, fmt.Errorf("while loading context: %w", err) } // Start loading configs csParsers := parser.NewParsers(hub) if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil { - return nil, fmt.Errorf("while loading parsers: %w", err) + return nil, nil, fmt.Errorf("while loading parsers: %w", err) } if err := LoadBuckets(cConfig, hub); err != nil { - return nil, fmt.Errorf("while loading scenarios: %w", err) + return nil, nil, fmt.Errorf("while loading scenarios: %w", err) } if err := appsec.LoadAppsecRules(hub); err != nil { - return nil, fmt.Errorf("while loading appsec rules: %w", err) + return nil, nil, fmt.Errorf("while loading appsec rules: %w", err) } - if err := LoadAcquisition(cConfig); err != nil { - return nil, fmt.Errorf("while loading acquisition config: %w", err) + datasources, err := LoadAcquisition(cConfig) + if err != nil { + return nil, nil, fmt.Errorf("while loading acquisition config: %w", err) } - return csParsers, nil + return csParsers, datasources, nil } -func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.Hub) error { +// runCrowdsec starts the log processor service +func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.Hub, datasources []acquisition.DataSource) error { inputEventChan = make(chan types.Event) inputLineChan = make(chan types.Event) - //start go-routines for parsing, buckets pour and outputs. + // start go-routines for parsing, buckets pour and outputs. parserWg := &sync.WaitGroup{} parsersTomb.Go(func() error { @@ -65,7 +68,8 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H parsersTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runParse") - if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors + if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { + // this error will never happen as parser.Parse is not able to return errors log.Fatalf("starting parse error : %s", err) return err } @@ -161,7 +165,8 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return nil } -func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) { +// serveCrowdsec wraps the log processor service +func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, datasources []acquisition.DataSource, agentReady chan bool) { crowdsecTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveCrowdsec") @@ -171,7 +176,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub log.Debugf("running agent after %s ms", time.Since(crowdsecT0)) agentReady <- true - if err := runCrowdsec(cConfig, parsers, hub); err != nil { + if err := runCrowdsec(cConfig, parsers, hub, datasources); err != nil { log.Fatalf("unable to start crowdsec routines: %s", err) } }() diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 2040141bb3e..7f3070b5f29 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -1,6 +1,7 @@ package main import ( + "errors" "flag" "fmt" _ "net/http/pprof" @@ -10,7 +11,6 @@ import ( "strings" "time" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -95,7 +95,7 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { - return fmt.Errorf("scenario loading failed: %v", err) + return fmt.Errorf("scenario loading failed: %w", err) } if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { @@ -107,7 +107,7 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { return nil } -func LoadAcquisition(cConfig *csconfig.Config) error { +func LoadAcquisition(cConfig *csconfig.Config) ([]acquisition.DataSource, error) { var err error if flags.SingleFileType != "" && flags.OneShotDSN != "" { @@ -116,20 +116,20 @@ func LoadAcquisition(cConfig *csconfig.Config) error { dataSources, err = acquisition.LoadAcquisitionFromDSN(flags.OneShotDSN, flags.Labels, flags.Transform) if err != nil { - return errors.Wrapf(err, "failed to configure datasource for %s", flags.OneShotDSN) + return nil, fmt.Errorf("failed to configure datasource for %s: %w", flags.OneShotDSN, err) } } else { dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec) if err != nil { - return err + return nil, err } } if len(dataSources) == 0 { - return fmt.Errorf("no datasource enabled") + return nil, errors.New("no datasource enabled") } - return nil + return dataSources, nil } var ( @@ -272,7 +272,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo } if cConfig.DisableAPI && cConfig.DisableAgent { - return nil, errors.New("You must run at least the API Server or crowdsec") + return nil, errors.New("you must run at least the API Server or crowdsec") } if flags.OneShotDSN != "" && flags.SingleFileType == "" { @@ -360,11 +360,14 @@ func main() { if err != nil { log.Fatalf("could not create CPU profile: %s", err) } + log.Infof("CPU profile will be written to %s", flags.CpuProfile) + if err := pprof.StartCPUProfile(f); err != nil { f.Close() log.Fatalf("could not start CPU profile: %s", err) } + defer f.Close() defer pprof.StopCPUProfile() } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 22f65b927a0..c8ccd4d5d70 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -86,7 +86,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { return nil, fmt.Errorf("while loading hub index: %w", err) } - csParsers, err := initCrowdsec(cConfig, hub) + csParsers, datasources, err := initCrowdsec(cConfig, hub) if err != nil { return nil, fmt.Errorf("unable to init crowdsec: %w", err) } @@ -103,7 +103,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } agentReady := make(chan bool, 1) - serveCrowdsec(csParsers, cConfig, hub, agentReady) + serveCrowdsec(csParsers, cConfig, hub, datasources, agentReady) } log.Printf("Reload is finished") @@ -230,7 +230,7 @@ func drainChan(c chan types.Event) { for { select { case _, ok := <-c: - if !ok { //closed + if !ok { // closed return } default: @@ -256,8 +256,8 @@ func HandleSignals(cConfig *csconfig.Config) error { exitChan := make(chan error) - //Always try to stop CPU profiling to avoid passing flags around - //It's a noop if profiling is not enabled + // Always try to stop CPU profiling to avoid passing flags around + // It's a noop if profiling is not enabled defer pprof.StopCPUProfile() go func() { @@ -369,14 +369,14 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return fmt.Errorf("while loading hub index: %w", err) } - csParsers, err := initCrowdsec(cConfig, hub) + csParsers, datasources, err := initCrowdsec(cConfig, hub) if err != nil { return fmt.Errorf("crowdsec init: %w", err) } // if it's just linting, we're done if !flags.TestMode { - serveCrowdsec(csParsers, cConfig, hub, agentReady) + serveCrowdsec(csParsers, cConfig, hub, datasources, agentReady) } else { agentReady <- true } diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 01e74a94db4..4c14f5f7d49 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -37,6 +37,35 @@ type ConsoleConfig struct { ShareContext *bool `yaml:"share_context"` } +func (c *ConsoleConfig) EnabledOptions() []string { + ret := []string{} + if c == nil { + return ret + } + + if c.ShareCustomScenarios != nil && *c.ShareCustomScenarios { + ret = append(ret, SEND_CUSTOM_SCENARIOS) + } + + if c.ShareTaintedScenarios != nil && *c.ShareTaintedScenarios { + ret = append(ret, SEND_TAINTED_SCENARIOS) + } + + if c.ShareManualDecisions != nil && *c.ShareManualDecisions { + ret = append(ret, SEND_MANUAL_SCENARIOS) + } + + if c.ConsoleManagement != nil && *c.ConsoleManagement { + ret = append(ret, CONSOLE_MANAGEMENT) + } + + if c.ShareContext != nil && *c.ShareContext { + ret = append(ret, SEND_CONTEXT) + } + + return ret +} + func (c *ConsoleConfig) IsPAPIEnabled() bool { if c == nil || c.ConsoleManagement == nil { return false diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index a585930e34c..7051b4d33a3 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -38,7 +38,7 @@ teardown() { @test "crowdsec (no api and no agent)" { rune -0 wait-for \ - --err "You must run at least the API Server or crowdsec" \ + --err "you must run at least the API Server or crowdsec" \ "${CROWDSEC}" -no-api -no-cs } diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 4819d724fea..233340e500f 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -28,7 +28,7 @@ teardown() { @test "lapi (.api.server.enable=false)" { rune -0 config_set '.api.server.enable=false' rune -1 "${CROWDSEC}" -no-cs - assert_stderr --partial "You must run at least the API Server or crowdsec" + assert_stderr --partial "you must run at least the API Server or crowdsec" } @test "lapi (no .api.server.listen_uri)" { From 5356ccc6cd138faa0f41fabaa5733bb94d9fb017 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 6 Mar 2024 13:42:57 +0100 Subject: [PATCH 043/318] cron: spread server load when upgrading hub and data files (#2873) --- config/crowdsec.cron.daily | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/config/crowdsec.cron.daily b/config/crowdsec.cron.daily index 1c110df38fc..9c488d29884 100644 --- a/config/crowdsec.cron.daily +++ b/config/crowdsec.cron.daily @@ -2,12 +2,13 @@ test -x /usr/bin/cscli || exit 0 +# splay hub upgrade and crowdsec reload +sleep "$(seq 1 300 | shuf -n 1)" + /usr/bin/cscli --error hub update upgraded=$(/usr/bin/cscli --error hub upgrade) if [ -n "$upgraded" ]; then - # splay initial metrics push - sleep $(seq 1 90 | shuf -n 1) systemctl reload crowdsec fi From e611d01c90f3e57d591f3875c2e6c629cba9c68a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:27:05 +0100 Subject: [PATCH 044/318] cscli: hide hashed api keys (#2874) * cscli: hide hashed api keys * lint --- docker/test/tests/test_bouncer.py | 3 --- pkg/database/bouncers.go | 32 +++++++++++++++++++----------- pkg/database/ent/bouncer.go | 5 ++--- pkg/database/ent/schema/bouncer.go | 2 +- 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/docker/test/tests/test_bouncer.py b/docker/test/tests/test_bouncer.py index 1324c3bd38c..98b86de858c 100644 --- a/docker/test/tests/test_bouncer.py +++ b/docker/test/tests/test_bouncer.py @@ -36,8 +36,6 @@ def test_register_bouncer_env(crowdsec, flavor): bouncer1, bouncer2 = j assert bouncer1['name'] == 'bouncer1name' assert bouncer2['name'] == 'bouncer2name' - assert bouncer1['api_key'] == hex512('bouncer1key') - assert bouncer2['api_key'] == hex512('bouncer2key') # add a second bouncer at runtime res = cs.cont.exec_run('cscli bouncers add bouncer3name -k bouncer3key') @@ -48,7 +46,6 @@ def test_register_bouncer_env(crowdsec, flavor): assert len(j) == 3 bouncer3 = j[2] assert bouncer3['name'] == 'bouncer3name' - assert bouncer3['api_key'] == hex512('bouncer3key') # remove all bouncers res = cs.cont.exec_run('cscli bouncers delete bouncer1name bouncer2name bouncer3name') diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 496b9b6cc9c..2cc6b9dcb47 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -33,6 +33,7 @@ func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { if err != nil { return nil, errors.Wrapf(QueryFail, "listing bouncers: %s", err) } + return result, nil } @@ -48,8 +49,10 @@ func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authTy if ent.IsConstraintError(err) { return nil, fmt.Errorf("bouncer %s already exists", name) } - return nil, fmt.Errorf("unable to create bouncer: %s", err) + + return nil, fmt.Errorf("unable to create bouncer: %w", err) } + return bouncer, nil } @@ -63,7 +66,7 @@ func (c *Client) DeleteBouncer(name string) error { } if nbDeleted == 0 { - return fmt.Errorf("bouncer doesn't exist") + return errors.New("bouncer doesn't exist") } return nil @@ -74,36 +77,41 @@ func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { for i, b := range bouncers { ids[i] = b.ID } + nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(c.CTX) if err != nil { - return nbDeleted, fmt.Errorf("unable to delete bouncers: %s", err) + return nbDeleted, fmt.Errorf("unable to delete bouncers: %w", err) } + return nbDeleted, nil } -func (c *Client) UpdateBouncerLastPull(lastPull time.Time, ID int) error { - _, err := c.Ent.Bouncer.UpdateOneID(ID). +func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id). SetLastPull(lastPull). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine last pull in database: %s", err) + return fmt.Errorf("unable to update machine last pull in database: %w", err) } + return nil } -func (c *Client) UpdateBouncerIP(ipAddr string, ID int) error { - _, err := c.Ent.Bouncer.UpdateOneID(ID).SetIPAddress(ipAddr).Save(c.CTX) +func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update bouncer ip address in database: %s", err) + return fmt.Errorf("unable to update bouncer ip address in database: %w", err) } + return nil } -func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, ID int) error { - _, err := c.Ent.Bouncer.UpdateOneID(ID).SetVersion(version).SetType(bType).Save(c.CTX) +func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update bouncer type and version in database: %s", err) + return fmt.Errorf("unable to update bouncer type and version in database: %w", err) } + return nil } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index fe189c3817e..203f49a432d 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -24,7 +24,7 @@ type Bouncer struct { // Name holds the value of the "name" field. Name string `json:"name"` // APIKey holds the value of the "api_key" field. - APIKey string `json:"api_key"` + APIKey string `json:"-"` // Revoked holds the value of the "revoked" field. Revoked bool `json:"revoked"` // IPAddress holds the value of the "ip_address" field. @@ -193,8 +193,7 @@ func (b *Bouncer) String() string { builder.WriteString("name=") builder.WriteString(b.Name) builder.WriteString(", ") - builder.WriteString("api_key=") - builder.WriteString(b.APIKey) + builder.WriteString("api_key=") builder.WriteString(", ") builder.WriteString("revoked=") builder.WriteString(fmt.Sprintf("%v", b.Revoked)) diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index c3081291254..986a1bf3ba8 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -21,7 +21,7 @@ func (Bouncer) Fields() []ent.Field { Default(types.UtcNow). UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), - field.String("api_key").StructTag(`json:"api_key"`), // hash of api_key + field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), field.String("type").Optional().StructTag(`json:"type"`), From 98560d0cf56d9c56366065b9a40a915844238468 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 7 Mar 2024 12:29:10 +0100 Subject: [PATCH 045/318] bin/crowdsec: avoid writing errors twice when log_media=stdout (#2876) * bin/crowdsec: avoid writing errors twice when log_media=stdout simpler, correct hook usage * lint --- cmd/crowdsec/api.go | 2 +- cmd/crowdsec/fatalhook.go | 28 +++++++++++++++++++ cmd/crowdsec/hook.go | 43 ------------------------------ cmd/crowdsec/main.go | 21 ++++++++++----- cmd/crowdsec/metrics.go | 6 ++--- cmd/crowdsec/output.go | 8 +++--- cmd/crowdsec/parse.go | 2 +- cmd/crowdsec/pour.go | 19 ++++++++----- cmd/crowdsec/run_in_svc.go | 4 +-- cmd/crowdsec/run_in_svc_windows.go | 4 +-- 10 files changed, 68 insertions(+), 69 deletions(-) create mode 100644 cmd/crowdsec/fatalhook.go delete mode 100644 cmd/crowdsec/hook.go diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 4ac5c3ce96f..995345a25e4 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -1,11 +1,11 @@ package main import ( + "errors" "fmt" "runtime" "time" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/trace" diff --git a/cmd/crowdsec/fatalhook.go b/cmd/crowdsec/fatalhook.go new file mode 100644 index 00000000000..84a57406a21 --- /dev/null +++ b/cmd/crowdsec/fatalhook.go @@ -0,0 +1,28 @@ +package main + +import ( + "io" + + log "github.com/sirupsen/logrus" +) + +// FatalHook is used to log fatal messages to stderr when the rest goes to a file +type FatalHook struct { + Writer io.Writer + LogLevels []log.Level +} + +func (hook *FatalHook) Fire(entry *log.Entry) error { + line, err := entry.String() + if err != nil { + return err + } + + _, err = hook.Writer.Write([]byte(line)) + + return err +} + +func (hook *FatalHook) Levels() []log.Level { + return hook.LogLevels +} diff --git a/cmd/crowdsec/hook.go b/cmd/crowdsec/hook.go deleted file mode 100644 index 28515d9e474..00000000000 --- a/cmd/crowdsec/hook.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "io" - "os" - - log "github.com/sirupsen/logrus" -) - -type ConditionalHook struct { - Writer io.Writer - LogLevels []log.Level - Enabled bool -} - -func (hook *ConditionalHook) Fire(entry *log.Entry) error { - if hook.Enabled { - line, err := entry.String() - if err != nil { - return err - } - - _, err = hook.Writer.Write([]byte(line)) - - return err - } - - return nil -} - -func (hook *ConditionalHook) Levels() []log.Level { - return hook.LogLevels -} - -// The primal logging hook is set up before parsing config.yaml. -// Once config.yaml is parsed, the primal hook is disabled if the -// configured logger is writing to stderr. Otherwise it's used to -// report fatal errors and panics to stderr in addition to the log file. -var primalHook = &ConditionalHook{ - Writer: os.Stderr, - LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, - Enabled: true, -} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 7f3070b5f29..70f7d48dce4 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -72,7 +72,7 @@ type Flags struct { DisableCAPI bool Transform string OrderEvent bool - CpuProfile string + CPUProfile string } type labelsMap map[string]string @@ -181,7 +181,7 @@ func (f *Flags) Parse() { } flag.StringVar(&dumpFolder, "dump-data", "", "dump parsers/buckets raw outputs") - flag.StringVar(&f.CpuProfile, "cpu-profile", "", "write cpu profile to file") + flag.StringVar(&f.CPUProfile, "cpu-profile", "", "write cpu profile to file") flag.Parse() } @@ -249,7 +249,12 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo return nil, err } - primalHook.Enabled = (cConfig.Common.LogMedia != "stdout") + if cConfig.Common.LogMedia != "stdout" { + log.AddHook(&FatalHook{ + Writer: os.Stderr, + LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, + }) + } if err := csconfig.LoadFeatureFlagsFile(configFile, log.StandardLogger()); err != nil { return nil, err @@ -323,7 +328,9 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo var crowdsecT0 time.Time func main() { - log.AddHook(primalHook) + // The initial log level is INFO, even if the user provided an -error or -warning flag + // because we need feature flags before parsing cli flags + log.SetFormatter(&log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true}) if err := fflag.RegisterAllFeatures(); err != nil { log.Fatalf("failed to register features: %s", err) @@ -355,13 +362,13 @@ func main() { os.Exit(0) } - if flags.CpuProfile != "" { - f, err := os.Create(flags.CpuProfile) + if flags.CPUProfile != "" { + f, err := os.Create(flags.CPUProfile) if err != nil { log.Fatalf("could not create CPU profile: %s", err) } - log.Infof("CPU profile will be written to %s", flags.CpuProfile) + log.Infof("CPU profile will be written to %s", flags.CPUProfile) if err := pprof.StartCPUProfile(f); err != nil { f.Close() diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 563bb56bfc9..aed43db00c8 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -104,12 +104,12 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // catch panics here because they are not handled by servePrometheus defer trace.CatchPanic("crowdsec/computeDynamicMetrics") - //update cache metrics (stash) + // update cache metrics (stash) cache.UpdateCacheMetrics() - //update cache metrics (regexp) + // update cache metrics (regexp) exprhelpers.UpdateRegexpCacheMetrics() - //decision metrics are only relevant for LAPI + // decision metrics are only relevant for LAPI if dbClient == nil { next.ServeHTTP(w, r) return diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index c4a2c0b6ac1..ac05b502e52 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -27,7 +27,7 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { } for k, src := range alert.Sources { - refsrc := *alert.Alert //copy + refsrc := *alert.Alert // copy log.Tracef("source[%s]", k) @@ -81,7 +81,7 @@ LOOP: cacheMutex.Unlock() if err := PushAlerts(cachecopy, client); err != nil { log.Errorf("while pushing to api : %s", err) - //just push back the events to the queue + // just push back the events to the queue cacheMutex.Lock() cache = append(cache, cachecopy...) cacheMutex.Unlock() @@ -110,8 +110,8 @@ LOOP: return fmt.Errorf("postoverflow failed: %w", err) } log.Printf("%s", *event.Overflow.Alert.Message) - //if the Alert is nil, it's to signal bucket is ready for GC, don't track this - //dump after postoveflow processing to avoid missing whitelist info + // if the Alert is nil, it's to signal bucket is ready for GC, don't track this + // dump after postoveflow processing to avoid missing whitelist info if dumpStates && event.Overflow.Alert != nil { if bucketOverflows == nil { bucketOverflows = make([]types.Event, 0) diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index c62eeb5869d..53c9ee65d4f 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -11,7 +11,6 @@ import ( ) func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { - LOOP: for { select { @@ -56,5 +55,6 @@ LOOP: output <- parsed } } + return nil } diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 3f717e3975d..388c7a6c1b3 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -4,27 +4,30 @@ import ( "fmt" "time" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" ) func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets, cConfig *csconfig.Config) error { count := 0 + for { - //bucket is now ready + // bucket is now ready select { case <-bucketsTomb.Dying(): log.Infof("Bucket routine exiting") return nil case parsed := <-input: startTime := time.Now() + count++ if count%5000 == 0 { log.Infof("%d existing buckets", leaky.LeakyRoutineCount) - //when in forensics mode, garbage collect buckets + // when in forensics mode, garbage collect buckets if cConfig.Crowdsec.BucketsGCEnabled { if parsed.MarshaledTime != "" { z := &time.Time{} @@ -32,26 +35,30 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) } else { log.Warning("Starting buckets garbage collection ...") + if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil { - return fmt.Errorf("failed to start bucket GC : %s", err) + return fmt.Errorf("failed to start bucket GC : %w", err) } } } } } - //here we can bucketify with parsed + // here we can bucketify with parsed poured, err := leaky.PourItemToHolders(parsed, holders, buckets) if err != nil { log.Errorf("bucketify failed for: %v", parsed) continue } + elapsed := time.Since(startTime) globalPourHistogram.With(prometheus.Labels{"type": parsed.Line.Module, "source": parsed.Line.Src}).Observe(elapsed.Seconds()) + if poured { globalBucketPourOk.Inc() } else { globalBucketPourKo.Inc() } + if len(parsed.MarshaledTime) != 0 { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { log.Warningf("failed to unmarshal time from event : %s", err) diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go index 5a8bc9a6cd3..58f4cdf005d 100644 --- a/cmd/crowdsec/run_in_svc.go +++ b/cmd/crowdsec/run_in_svc.go @@ -23,8 +23,8 @@ func StartRunSvc() error { defer trace.CatchPanic("crowdsec/StartRunSvc") - //Always try to stop CPU profiling to avoid passing flags around - //It's a noop if profiling is not enabled + // Always try to stop CPU profiling to avoid passing flags around + // It's a noop if profiling is not enabled defer pprof.StopCPUProfile() if cConfig, err = LoadConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false); err != nil { diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go index 7845e9c58b5..c0aa18d7fc6 100644 --- a/cmd/crowdsec/run_in_svc_windows.go +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -20,8 +20,8 @@ func StartRunSvc() error { defer trace.CatchPanic("crowdsec/StartRunSvc") - //Always try to stop CPU profiling to avoid passing flags around - //It's a noop if profiling is not enabled + // Always try to stop CPU profiling to avoid passing flags around + // It's a noop if profiling is not enabled defer pprof.StopCPUProfile() isRunninginService, err := svc.IsWindowsService() From 5731491b4e0948e5011e47a378a14f9a86e46b40 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 7 Mar 2024 14:04:50 +0100 Subject: [PATCH 046/318] Auto detect if reading logs or storing sqlite db on a network share (#2241) --- pkg/acquisition/modules/file/file.go | 44 +++++++++-- pkg/csconfig/api_test.go | 1 + pkg/csconfig/database.go | 36 ++++++++- pkg/csconfig/database_test.go | 7 +- pkg/types/getfstype.go | 112 +++++++++++++++++++++++++++ pkg/types/getfstype_windows.go | 53 +++++++++++++ pkg/types/utils.go | 10 +++ 7 files changed, 251 insertions(+), 12 deletions(-) create mode 100644 pkg/types/getfstype.go create mode 100644 pkg/types/getfstype_windows.go diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 4ea9466d457..9ab418a8442 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -38,9 +38,9 @@ type FileConfiguration struct { Filenames []string ExcludeRegexps []string `yaml:"exclude_regexps"` Filename string - ForceInotify bool `yaml:"force_inotify"` - MaxBufferSize int `yaml:"max_buffer_size"` - PollWithoutInotify bool `yaml:"poll_without_inotify"` + ForceInotify bool `yaml:"force_inotify"` + MaxBufferSize int `yaml:"max_buffer_size"` + PollWithoutInotify *bool `yaml:"poll_without_inotify"` configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -330,7 +330,22 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er continue } - tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: f.config.PollWithoutInotify, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) + inotifyPoll := true + if f.config.PollWithoutInotify != nil { + inotifyPoll = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(file) + if err != nil { + f.logger.Warningf("Could not get fs type for %s : %s", file, err) + } + f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) + if networkFS { + f.logger.Warnf("Disabling inotify poll on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + inotifyPoll = false + } + } + + tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) if err != nil { f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue @@ -413,8 +428,27 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { f.logger.Errorf("unable to close %s : %s", event.Name, err) continue } + + inotifyPoll := true + if f.config.PollWithoutInotify != nil { + inotifyPoll = *f.config.PollWithoutInotify + } else { + if f.config.PollWithoutInotify != nil { + inotifyPoll = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(event.Name) + if err != nil { + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) + } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + if networkFS { + inotifyPoll = false + } + } + } + //Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: f.config.PollWithoutInotify, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) if err != nil { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index b6febd4d450..463b7c1b2ec 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -194,6 +194,7 @@ func TestLoadAPIServer(t *testing.T) { DbPath: "./testdata/test.db", Type: "sqlite", MaxOpenConns: ptr.Of(DEFAULT_MAX_OPEN_CONNS), + UseWal: ptr.Of(true), // autodetected DecisionBulkSize: defaultDecisionBulkSize, }, ConsoleConfigPath: DefaultConfigPath("console.yaml"), diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 2df2207859d..a7bc57eefdc 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -3,12 +3,15 @@ package csconfig import ( "errors" "fmt" + "path/filepath" "time" "entgo.io/ent/dialect" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -69,6 +72,35 @@ func (c *Config) LoadDBConfig(inCli bool) error { c.DbConfig.MaxOpenConns = ptr.Of(DEFAULT_MAX_OPEN_CONNS) } + if !inCli && c.DbConfig.Type == "sqlite" { + if c.DbConfig.UseWal == nil { + dbDir := filepath.Dir(c.DbConfig.DbPath) + isNetwork, fsType, err := types.IsNetworkFS(dbDir) + if err != nil { + log.Warnf("unable to determine if database is on network filesystem: %s", err) + log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") + return nil + } + if isNetwork { + log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) + c.DbConfig.UseWal = ptr.Of(false) + } else { + log.Debugf("database is on local filesystem (%s), setting useWal to true", fsType) + c.DbConfig.UseWal = ptr.Of(true) + } + } else if *c.DbConfig.UseWal { + dbDir := filepath.Dir(c.DbConfig.DbPath) + isNetwork, fsType, err := types.IsNetworkFS(dbDir) + if err != nil { + log.Warnf("unable to determine if database is on network filesystem: %s", err) + return nil + } + if isNetwork { + log.Warnf("database seems to be stored on a network share (%s), but useWal is set to true. Proceed at your own risk.", fsType) + } + } + } + if c.DbConfig.DecisionBulkSize == 0 { log.Tracef("No decision_bulk_size value provided, using default value of %d", defaultDecisionBulkSize) c.DbConfig.DecisionBulkSize = defaultDecisionBulkSize @@ -79,10 +111,6 @@ func (c *Config) LoadDBConfig(inCli bool) error { c.DbConfig.DecisionBulkSize = maxDecisionBulkSize } - if !inCli && c.DbConfig.Type == "sqlite" && c.DbConfig.UseWal == nil { - log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") - } - return nil } diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go index a946025799d..c7741baf038 100644 --- a/pkg/csconfig/database_test.go +++ b/pkg/csconfig/database_test.go @@ -30,9 +30,10 @@ func TestLoadDBConfig(t *testing.T) { }, }, expected: &DatabaseCfg{ - Type: "sqlite", - DbPath: "./testdata/test.db", - MaxOpenConns: ptr.Of(10), + Type: "sqlite", + DbPath: "./testdata/test.db", + MaxOpenConns: ptr.Of(10), + UseWal: ptr.Of(true), DecisionBulkSize: defaultDecisionBulkSize, }, }, diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go new file mode 100644 index 00000000000..4a54fc9481e --- /dev/null +++ b/pkg/types/getfstype.go @@ -0,0 +1,112 @@ +//go:build !windows + +package types + +import ( + "fmt" + "syscall" +) + +// Generated with `man statfs | grep _MAGIC | awk '{split(tolower($1),a,"_"); print $2 ": \"" a[1] "\","}'` +// ext2/3/4 duplicates removed to just have ext4 +// XIAFS removed as well +var fsTypeMapping map[int]string = map[int]string{ + 0xadf5: "adfs", + 0xadff: "affs", + 0x5346414f: "afs", + 0x09041934: "anon", + 0x0187: "autofs", + 0x62646576: "bdevfs", + 0x42465331: "befs", + 0x1badface: "bfs", + 0x42494e4d: "binfmtfs", + 0xcafe4a11: "bpf", + 0x9123683e: "btrfs", + 0x73727279: "btrfs", + 0x27e0eb: "cgroup", + 0x63677270: "cgroup2", + 0xff534d42: "cifs", + 0x73757245: "coda", + 0x012ff7b7: "coh", + 0x28cd3d45: "cramfs", + 0x64626720: "debugfs", + 0x1373: "devfs", + 0x1cd1: "devpts", + 0xf15f: "ecryptfs", + 0xde5e81e4: "efivarfs", + 0x00414a53: "efs", + 0x137d: "ext", + 0xef51: "ext2", + 0xef53: "ext4", + 0xf2f52010: "f2fs", + 0x65735546: "fuse", + 0xbad1dea: "futexfs", + 0x4244: "hfs", + 0x00c0ffee: "hostfs", + 0xf995e849: "hpfs", + 0x958458f6: "hugetlbfs", + 0x9660: "isofs", + 0x72b6: "jffs2", + 0x3153464a: "jfs", + 0x137f: "minix", + 0x138f: "minix", + 0x2468: "minix2", + 0x2478: "minix2", + 0x4d5a: "minix3", + 0x19800202: "mqueue", + 0x4d44: "msdos", + 0x11307854: "mtd", + 0x564c: "ncp", + 0x6969: "nfs", + 0x3434: "nilfs", + 0x6e736673: "nsfs", + 0x5346544e: "ntfs", + 0x7461636f: "ocfs2", + 0x9fa1: "openprom", + 0x794c7630: "overlayfs", + 0x50495045: "pipefs", + 0x9fa0: "proc", + 0x6165676c: "pstorefs", + 0x002f: "qnx4", + 0x68191122: "qnx6", + 0x858458f6: "ramfs", + 0x52654973: "reiserfs", + 0x7275: "romfs", + 0x73636673: "securityfs", + 0xf97cff8c: "selinux", + 0x43415d53: "smack", + 0x517b: "smb", + 0xfe534d42: "smb2", + 0x534f434b: "sockfs", + 0x73717368: "squashfs", + 0x62656572: "sysfs", + 0x012ff7b6: "sysv2", + 0x012ff7b5: "sysv4", + 0x01021994: "tmpfs", + 0x74726163: "tracefs", + 0x15013346: "udf", + 0x00011954: "ufs", + 0x9fa2: "usbdevice", + 0x01021997: "v9fs", + 0xa501fcf5: "vxfs", + 0xabba1974: "xenfs", + 0x012ff7b4: "xenix", + 0x58465342: "xfs", +} + +func GetFSType(path string) (string, error) { + var buf syscall.Statfs_t + + err := syscall.Statfs(path, &buf) + + if err != nil { + return "", err + } + + fsType, ok := fsTypeMapping[int(buf.Type)] + if !ok { + return "", fmt.Errorf("unknown fstype %d", buf.Type) + } + + return fsType, nil +} diff --git a/pkg/types/getfstype_windows.go b/pkg/types/getfstype_windows.go new file mode 100644 index 00000000000..03d8fffd48d --- /dev/null +++ b/pkg/types/getfstype_windows.go @@ -0,0 +1,53 @@ +package types + +import ( + "path/filepath" + "syscall" + "unsafe" +) + +func GetFSType(path string) (string, error) { + kernel32, err := syscall.LoadLibrary("kernel32.dll") + if err != nil { + return "", err + } + defer syscall.FreeLibrary(kernel32) + + getVolumeInformation, err := syscall.GetProcAddress(kernel32, "GetVolumeInformationW") + if err != nil { + return "", err + } + + // Convert relative path to absolute path + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + + // Get the root path of the volume + volumeRoot := filepath.VolumeName(absPath) + "\\" + + volumeRootPtr, _ := syscall.UTF16PtrFromString(volumeRoot) + + var ( + fileSystemNameBuffer = make([]uint16, 260) + nFileSystemNameSize = uint32(len(fileSystemNameBuffer)) + ) + + ret, _, err := syscall.SyscallN(getVolumeInformation, + uintptr(unsafe.Pointer(volumeRootPtr)), + 0, + 0, + 0, + 0, + 0, + uintptr(unsafe.Pointer(&fileSystemNameBuffer[0])), + uintptr(nFileSystemNameSize), + 0) + + if ret == 0 { + return "", err + } + + return syscall.UTF16ToString(fileSystemNameBuffer), nil +} diff --git a/pkg/types/utils.go b/pkg/types/utils.go index e42c36d8aeb..712d44ba12d 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -3,6 +3,7 @@ package types import ( "fmt" "path/filepath" + "strings" "time" log "github.com/sirupsen/logrus" @@ -67,3 +68,12 @@ func ConfigureLogger(clog *log.Logger) error { func UtcNow() time.Time { return time.Now().UTC() } + +func IsNetworkFS(path string) (bool, string, error) { + fsType, err := GetFSType(path) + if err != nil { + return false, "", err + } + fsType = strings.ToLower(fsType) + return fsType == "nfs" || fsType == "cifs" || fsType == "smb" || fsType == "smb2", fsType, nil +} From 8108e4156d1f564d7acc950e9b49488da5021c17 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 7 Mar 2024 14:25:25 +0100 Subject: [PATCH 047/318] CI: "make generate" target; use ent 0.12.5 (#2871) * CI: "make generate" target; pin tool versions * use ent 0.12.5 * fix make help * fix model generation target; re-run swagger --- Makefile | 4 ++ go.mod | 2 +- go.sum | 6 +- pkg/database/ent/alert_update.go | 16 ++++++ pkg/database/ent/bouncer_update.go | 48 ++++++++++++++++ pkg/database/ent/client.go | 11 +++- pkg/database/ent/configitem_update.go | 32 +++++++++++ pkg/database/ent/decision_update.go | 80 +++++++++++++++++++++++++++ pkg/database/ent/event_update.go | 32 +++++++++++ pkg/database/ent/generate.go | 2 +- pkg/database/ent/lock_update.go | 16 ++++++ pkg/database/ent/machine_update.go | 48 ++++++++++++++++ pkg/database/ent/meta_update.go | 32 +++++++++++ pkg/database/ent/runtime/runtime.go | 4 +- pkg/models/add_alerts_request.go | 5 ++ pkg/models/alert.go | 11 ++++ pkg/models/generate.go | 4 ++ pkg/models/get_alerts_response.go | 5 ++ pkg/models/get_decisions_response.go | 5 ++ pkg/models/meta.go | 5 ++ pkg/models/metrics.go | 10 ++++ 21 files changed, 367 insertions(+), 11 deletions(-) create mode 100644 pkg/models/generate.go diff --git a/Makefile b/Makefile index 5d656165fa8..3f271c54ca4 100644 --- a/Makefile +++ b/Makefile @@ -202,6 +202,10 @@ cscli: goversion ## Build cscli crowdsec: goversion ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) +.PHONY: generate +generate: ## Generate code for the database and APIs + $(GO) generate ./pkg/database/ent + $(GO) generate ./pkg/models .PHONY: testclean testclean: bats-clean ## Remove test artifacts diff --git a/go.mod b/go.mod index c2d6ca2c148..06e0275c82b 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ go 1.21 // toolchain go1.21.3 require ( - entgo.io/ent v0.12.4 + entgo.io/ent v0.12.5 github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 diff --git a/go.sum b/go.sum index 7e860300089..35ab5813fca 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 h1:JnYs/y8RJ3+MiIUp+3RgyyeO ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw= bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= -entgo.io/ent v0.12.4 h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8= -entgo.io/ent v0.12.4/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= +entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= +entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -540,8 +540,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index 0e41ba18109..f8a4d108527 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -62,6 +62,14 @@ func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { return au } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (au *AlertUpdate) SetNillableScenario(s *string) *AlertUpdate { + if s != nil { + au.SetScenario(*s) + } + return au +} + // SetBucketId sets the "bucketId" field. func (au *AlertUpdate) SetBucketId(s string) *AlertUpdate { au.mutation.SetBucketId(s) @@ -1029,6 +1037,14 @@ func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { return auo } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableScenario(s *string) *AlertUpdateOne { + if s != nil { + auo.SetScenario(*s) + } + return auo +} + // SetBucketId sets the "bucketId" field. func (auo *AlertUpdateOne) SetBucketId(s string) *AlertUpdateOne { auo.mutation.SetBucketId(s) diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index f7e71eb315e..b3f5e1a5540 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -58,18 +58,42 @@ func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { return bu } +// SetNillableName sets the "name" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableName(s *string) *BouncerUpdate { + if s != nil { + bu.SetName(*s) + } + return bu +} + // SetAPIKey sets the "api_key" field. func (bu *BouncerUpdate) SetAPIKey(s string) *BouncerUpdate { bu.mutation.SetAPIKey(s) return bu } +// SetNillableAPIKey sets the "api_key" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableAPIKey(s *string) *BouncerUpdate { + if s != nil { + bu.SetAPIKey(*s) + } + return bu +} + // SetRevoked sets the "revoked" field. func (bu *BouncerUpdate) SetRevoked(b bool) *BouncerUpdate { bu.mutation.SetRevoked(b) return bu } +// SetNillableRevoked sets the "revoked" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableRevoked(b *bool) *BouncerUpdate { + if b != nil { + bu.SetRevoked(*b) + } + return bu +} + // SetIPAddress sets the "ip_address" field. func (bu *BouncerUpdate) SetIPAddress(s string) *BouncerUpdate { bu.mutation.SetIPAddress(s) @@ -333,18 +357,42 @@ func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { return buo } +// SetNillableName sets the "name" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableName(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetName(*s) + } + return buo +} + // SetAPIKey sets the "api_key" field. func (buo *BouncerUpdateOne) SetAPIKey(s string) *BouncerUpdateOne { buo.mutation.SetAPIKey(s) return buo } +// SetNillableAPIKey sets the "api_key" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableAPIKey(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetAPIKey(*s) + } + return buo +} + // SetRevoked sets the "revoked" field. func (buo *BouncerUpdateOne) SetRevoked(b bool) *BouncerUpdateOne { buo.mutation.SetRevoked(b) return buo } +// SetNillableRevoked sets the "revoked" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableRevoked(b *bool) *BouncerUpdateOne { + if b != nil { + buo.SetRevoked(*b) + } + return buo +} + // SetIPAddress sets the "ip_address" field. func (buo *BouncerUpdateOne) SetIPAddress(s string) *BouncerUpdateOne { buo.mutation.SetIPAddress(s) diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 006d52ef9ba..5318109ed42 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -50,9 +50,7 @@ type Client struct { // NewClient creates a new client configured with the given options. func NewClient(opts ...Option) *Client { - cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} - cfg.options(opts...) - client := &Client{config: cfg} + client := &Client{config: newConfig(opts...)} client.init() return client } @@ -87,6 +85,13 @@ type ( Option func(*config) ) +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + // options applies the options on the config object. func (c *config) options(opts ...Option) { for _, opt := range opts { diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index 0db3a0b5233..11fb0755191 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -58,12 +58,28 @@ func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { return ciu } +// SetNillableName sets the "name" field if the given value is not nil. +func (ciu *ConfigItemUpdate) SetNillableName(s *string) *ConfigItemUpdate { + if s != nil { + ciu.SetName(*s) + } + return ciu +} + // SetValue sets the "value" field. func (ciu *ConfigItemUpdate) SetValue(s string) *ConfigItemUpdate { ciu.mutation.SetValue(s) return ciu } +// SetNillableValue sets the "value" field if the given value is not nil. +func (ciu *ConfigItemUpdate) SetNillableValue(s *string) *ConfigItemUpdate { + if s != nil { + ciu.SetValue(*s) + } + return ciu +} + // Mutation returns the ConfigItemMutation object of the builder. func (ciu *ConfigItemUpdate) Mutation() *ConfigItemMutation { return ciu.mutation @@ -186,12 +202,28 @@ func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { return ciuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (ciuo *ConfigItemUpdateOne) SetNillableName(s *string) *ConfigItemUpdateOne { + if s != nil { + ciuo.SetName(*s) + } + return ciuo +} + // SetValue sets the "value" field. func (ciuo *ConfigItemUpdateOne) SetValue(s string) *ConfigItemUpdateOne { ciuo.mutation.SetValue(s) return ciuo } +// SetNillableValue sets the "value" field if the given value is not nil. +func (ciuo *ConfigItemUpdateOne) SetNillableValue(s *string) *ConfigItemUpdateOne { + if s != nil { + ciuo.SetValue(*s) + } + return ciuo +} + // Mutation returns the ConfigItemMutation object of the builder. func (ciuo *ConfigItemUpdateOne) Mutation() *ConfigItemMutation { return ciuo.mutation diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 1b62cc54c30..182457e9f63 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -79,12 +79,28 @@ func (du *DecisionUpdate) SetScenario(s string) *DecisionUpdate { return du } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableScenario(s *string) *DecisionUpdate { + if s != nil { + du.SetScenario(*s) + } + return du +} + // SetType sets the "type" field. func (du *DecisionUpdate) SetType(s string) *DecisionUpdate { du.mutation.SetType(s) return du } +// SetNillableType sets the "type" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableType(s *string) *DecisionUpdate { + if s != nil { + du.SetType(*s) + } + return du +} + // SetStartIP sets the "start_ip" field. func (du *DecisionUpdate) SetStartIP(i int64) *DecisionUpdate { du.mutation.ResetStartIP() @@ -226,18 +242,42 @@ func (du *DecisionUpdate) SetScope(s string) *DecisionUpdate { return du } +// SetNillableScope sets the "scope" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableScope(s *string) *DecisionUpdate { + if s != nil { + du.SetScope(*s) + } + return du +} + // SetValue sets the "value" field. func (du *DecisionUpdate) SetValue(s string) *DecisionUpdate { du.mutation.SetValue(s) return du } +// SetNillableValue sets the "value" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableValue(s *string) *DecisionUpdate { + if s != nil { + du.SetValue(*s) + } + return du +} + // SetOrigin sets the "origin" field. func (du *DecisionUpdate) SetOrigin(s string) *DecisionUpdate { du.mutation.SetOrigin(s) return du } +// SetNillableOrigin sets the "origin" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableOrigin(s *string) *DecisionUpdate { + if s != nil { + du.SetOrigin(*s) + } + return du +} + // SetSimulated sets the "simulated" field. func (du *DecisionUpdate) SetSimulated(b bool) *DecisionUpdate { du.mutation.SetSimulated(b) @@ -557,12 +597,28 @@ func (duo *DecisionUpdateOne) SetScenario(s string) *DecisionUpdateOne { return duo } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableScenario(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetScenario(*s) + } + return duo +} + // SetType sets the "type" field. func (duo *DecisionUpdateOne) SetType(s string) *DecisionUpdateOne { duo.mutation.SetType(s) return duo } +// SetNillableType sets the "type" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableType(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetType(*s) + } + return duo +} + // SetStartIP sets the "start_ip" field. func (duo *DecisionUpdateOne) SetStartIP(i int64) *DecisionUpdateOne { duo.mutation.ResetStartIP() @@ -704,18 +760,42 @@ func (duo *DecisionUpdateOne) SetScope(s string) *DecisionUpdateOne { return duo } +// SetNillableScope sets the "scope" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableScope(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetScope(*s) + } + return duo +} + // SetValue sets the "value" field. func (duo *DecisionUpdateOne) SetValue(s string) *DecisionUpdateOne { duo.mutation.SetValue(s) return duo } +// SetNillableValue sets the "value" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableValue(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetValue(*s) + } + return duo +} + // SetOrigin sets the "origin" field. func (duo *DecisionUpdateOne) SetOrigin(s string) *DecisionUpdateOne { duo.mutation.SetOrigin(s) return duo } +// SetNillableOrigin sets the "origin" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableOrigin(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetOrigin(*s) + } + return duo +} + // SetSimulated sets the "simulated" field. func (duo *DecisionUpdateOne) SetSimulated(b bool) *DecisionUpdateOne { duo.mutation.SetSimulated(b) diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index db748101519..a06178f79af 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -59,12 +59,28 @@ func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { return eu } +// SetNillableTime sets the "time" field if the given value is not nil. +func (eu *EventUpdate) SetNillableTime(t *time.Time) *EventUpdate { + if t != nil { + eu.SetTime(*t) + } + return eu +} + // SetSerialized sets the "serialized" field. func (eu *EventUpdate) SetSerialized(s string) *EventUpdate { eu.mutation.SetSerialized(s) return eu } +// SetNillableSerialized sets the "serialized" field if the given value is not nil. +func (eu *EventUpdate) SetNillableSerialized(s *string) *EventUpdate { + if s != nil { + eu.SetSerialized(*s) + } + return eu +} + // SetAlertEvents sets the "alert_events" field. func (eu *EventUpdate) SetAlertEvents(i int) *EventUpdate { eu.mutation.SetAlertEvents(i) @@ -274,12 +290,28 @@ func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { return euo } +// SetNillableTime sets the "time" field if the given value is not nil. +func (euo *EventUpdateOne) SetNillableTime(t *time.Time) *EventUpdateOne { + if t != nil { + euo.SetTime(*t) + } + return euo +} + // SetSerialized sets the "serialized" field. func (euo *EventUpdateOne) SetSerialized(s string) *EventUpdateOne { euo.mutation.SetSerialized(s) return euo } +// SetNillableSerialized sets the "serialized" field if the given value is not nil. +func (euo *EventUpdateOne) SetNillableSerialized(s *string) *EventUpdateOne { + if s != nil { + euo.SetSerialized(*s) + } + return euo +} + // SetAlertEvents sets the "alert_events" field. func (euo *EventUpdateOne) SetAlertEvents(i int) *EventUpdateOne { euo.mutation.SetAlertEvents(i) diff --git a/pkg/database/ent/generate.go b/pkg/database/ent/generate.go index 9f3a916c7a4..5f4b39eec90 100644 --- a/pkg/database/ent/generate.go +++ b/pkg/database/ent/generate.go @@ -1,4 +1,4 @@ package ent -//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema +//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.12.5 generate ./schema diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index f4deda6e3a8..dc61dfdfde1 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -34,6 +34,14 @@ func (lu *LockUpdate) SetName(s string) *LockUpdate { return lu } +// SetNillableName sets the "name" field if the given value is not nil. +func (lu *LockUpdate) SetNillableName(s *string) *LockUpdate { + if s != nil { + lu.SetName(*s) + } + return lu +} + // SetCreatedAt sets the "created_at" field. func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { lu.mutation.SetCreatedAt(t) @@ -121,6 +129,14 @@ func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { return luo } +// SetNillableName sets the "name" field if the given value is not nil. +func (luo *LockUpdateOne) SetNillableName(s *string) *LockUpdateOne { + if s != nil { + luo.SetName(*s) + } + return luo +} + // SetCreatedAt sets the "created_at" field. func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { luo.mutation.SetCreatedAt(t) diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index eb517081174..1f87ac04d6f 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -83,18 +83,42 @@ func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { return mu } +// SetNillableMachineId sets the "machineId" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableMachineId(s *string) *MachineUpdate { + if s != nil { + mu.SetMachineId(*s) + } + return mu +} + // SetPassword sets the "password" field. func (mu *MachineUpdate) SetPassword(s string) *MachineUpdate { mu.mutation.SetPassword(s) return mu } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (mu *MachineUpdate) SetNillablePassword(s *string) *MachineUpdate { + if s != nil { + mu.SetPassword(*s) + } + return mu +} + // SetIpAddress sets the "ipAddress" field. func (mu *MachineUpdate) SetIpAddress(s string) *MachineUpdate { mu.mutation.SetIpAddress(s) return mu } +// SetNillableIpAddress sets the "ipAddress" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableIpAddress(s *string) *MachineUpdate { + if s != nil { + mu.SetIpAddress(*s) + } + return mu +} + // SetScenarios sets the "scenarios" field. func (mu *MachineUpdate) SetScenarios(s string) *MachineUpdate { mu.mutation.SetScenarios(s) @@ -470,18 +494,42 @@ func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { return muo } +// SetNillableMachineId sets the "machineId" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableMachineId(s *string) *MachineUpdateOne { + if s != nil { + muo.SetMachineId(*s) + } + return muo +} + // SetPassword sets the "password" field. func (muo *MachineUpdateOne) SetPassword(s string) *MachineUpdateOne { muo.mutation.SetPassword(s) return muo } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillablePassword(s *string) *MachineUpdateOne { + if s != nil { + muo.SetPassword(*s) + } + return muo +} + // SetIpAddress sets the "ipAddress" field. func (muo *MachineUpdateOne) SetIpAddress(s string) *MachineUpdateOne { muo.mutation.SetIpAddress(s) return muo } +// SetNillableIpAddress sets the "ipAddress" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableIpAddress(s *string) *MachineUpdateOne { + if s != nil { + muo.SetIpAddress(*s) + } + return muo +} + // SetScenarios sets the "scenarios" field. func (muo *MachineUpdateOne) SetScenarios(s string) *MachineUpdateOne { muo.mutation.SetScenarios(s) diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index 8071c4f0df5..a1379faa130 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -59,12 +59,28 @@ func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { return mu } +// SetNillableKey sets the "key" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableKey(s *string) *MetaUpdate { + if s != nil { + mu.SetKey(*s) + } + return mu +} + // SetValue sets the "value" field. func (mu *MetaUpdate) SetValue(s string) *MetaUpdate { mu.mutation.SetValue(s) return mu } +// SetNillableValue sets the "value" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableValue(s *string) *MetaUpdate { + if s != nil { + mu.SetValue(*s) + } + return mu +} + // SetAlertMetas sets the "alert_metas" field. func (mu *MetaUpdate) SetAlertMetas(i int) *MetaUpdate { mu.mutation.SetAlertMetas(i) @@ -274,12 +290,28 @@ func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { return muo } +// SetNillableKey sets the "key" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableKey(s *string) *MetaUpdateOne { + if s != nil { + muo.SetKey(*s) + } + return muo +} + // SetValue sets the "value" field. func (muo *MetaUpdateOne) SetValue(s string) *MetaUpdateOne { muo.mutation.SetValue(s) return muo } +// SetNillableValue sets the "value" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableValue(s *string) *MetaUpdateOne { + if s != nil { + muo.SetValue(*s) + } + return muo +} + // SetAlertMetas sets the "alert_metas" field. func (muo *MetaUpdateOne) SetAlertMetas(i int) *MetaUpdateOne { muo.mutation.SetAlertMetas(i) diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index 2a645f624d7..d10a2fb5459 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.12.4" // Version of ent codegen. - Sum = "h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8=" // Sum of ent codegen. + Version = "v0.12.5" // Version of ent codegen. + Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen. ) diff --git a/pkg/models/add_alerts_request.go b/pkg/models/add_alerts_request.go index fd7246be066..a69934ef770 100644 --- a/pkg/models/add_alerts_request.go +++ b/pkg/models/add_alerts_request.go @@ -54,6 +54,11 @@ func (m AddAlertsRequest) ContextValidate(ctx context.Context, formats strfmt.Re for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/alert.go b/pkg/models/alert.go index ec769a1fbb1..895f5ad76e1 100644 --- a/pkg/models/alert.go +++ b/pkg/models/alert.go @@ -399,6 +399,11 @@ func (m *Alert) contextValidateDecisions(ctx context.Context, formats strfmt.Reg for i := 0; i < len(m.Decisions); i++ { if m.Decisions[i] != nil { + + if swag.IsZero(m.Decisions[i]) { // not required + return nil + } + if err := m.Decisions[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) @@ -419,6 +424,11 @@ func (m *Alert) contextValidateEvents(ctx context.Context, formats strfmt.Regist for i := 0; i < len(m.Events); i++ { if m.Events[i] != nil { + + if swag.IsZero(m.Events[i]) { // not required + return nil + } + if err := m.Events[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("events" + "." + strconv.Itoa(i)) @@ -469,6 +479,7 @@ func (m *Alert) contextValidateMeta(ctx context.Context, formats strfmt.Registry func (m *Alert) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/models/generate.go b/pkg/models/generate.go new file mode 100644 index 00000000000..ccacc409ab5 --- /dev/null +++ b/pkg/models/generate.go @@ -0,0 +1,4 @@ +package models + +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate model --spec=./localapi_swagger.yaml --target=../ + diff --git a/pkg/models/get_alerts_response.go b/pkg/models/get_alerts_response.go index 41b9d5afdbd..d4ea36e02c5 100644 --- a/pkg/models/get_alerts_response.go +++ b/pkg/models/get_alerts_response.go @@ -54,6 +54,11 @@ func (m GetAlertsResponse) ContextValidate(ctx context.Context, formats strfmt.R for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/get_decisions_response.go b/pkg/models/get_decisions_response.go index b65b950fc58..19437dc9b38 100644 --- a/pkg/models/get_decisions_response.go +++ b/pkg/models/get_decisions_response.go @@ -54,6 +54,11 @@ func (m GetDecisionsResponse) ContextValidate(ctx context.Context, formats strfm for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/meta.go b/pkg/models/meta.go index 6ad20856d6a..df5ae3c6285 100644 --- a/pkg/models/meta.go +++ b/pkg/models/meta.go @@ -56,6 +56,11 @@ func (m Meta) ContextValidate(ctx context.Context, formats strfmt.Registry) erro for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/metrics.go b/pkg/models/metrics.go index 573678d1f84..7fbb91c63e4 100644 --- a/pkg/models/metrics.go +++ b/pkg/models/metrics.go @@ -141,6 +141,11 @@ func (m *Metrics) contextValidateBouncers(ctx context.Context, formats strfmt.Re for i := 0; i < len(m.Bouncers); i++ { if m.Bouncers[i] != nil { + + if swag.IsZero(m.Bouncers[i]) { // not required + return nil + } + if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) @@ -161,6 +166,11 @@ func (m *Metrics) contextValidateMachines(ctx context.Context, formats strfmt.Re for i := 0; i < len(m.Machines); i++ { if m.Machines[i] != nil { + + if swag.IsZero(m.Machines[i]) { // not required + return nil + } + if err := m.Machines[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("machines" + "." + strconv.Itoa(i)) From 1eab943ec224e63d15fce31ef55961741f9b4077 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 7 Mar 2024 14:36:28 +0100 Subject: [PATCH 048/318] crowdsec: remove warning if prometheus port is taken during cold logs processing (#2857) i.e. remove a "Warning: port is already in use" because it's probably LAPI --- cmd/crowdsec/crowdsec.go | 2 +- cmd/crowdsec/main.go | 4 ++++ cmd/crowdsec/metrics.go | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 0d7d454edf2..37a12b9d3cb 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -280,7 +280,7 @@ func waitOnTomb() { case <-acquisTomb.Dead(): /*if it's acquisition dying it means that we were in "cat" mode. while shutting down, we need to give time for all buckets to process in flight data*/ - log.Warning("Acquisition is finished, shutting down") + log.Info("Acquisition is finished, shutting down") /* While it might make sense to want to shut-down parser/buckets/etc. as soon as acquisition is finished, we might have some pending buckets: buckets that overflowed, but whose LeakRoutine are still alive because they diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 70f7d48dce4..8950790480a 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -75,6 +75,10 @@ type Flags struct { CPUProfile string } +func (f *Flags) haveTimeMachine() bool { + return f.OneShotDSN != "" +} + type labelsMap map[string]string func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index aed43db00c8..682968bb75a 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -196,6 +196,9 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { - log.Warningf("prometheus: %s", err) + // in time machine, we most likely have the LAPI using the port + if !flags.haveTimeMachine() { + log.Warningf("prometheus: %s", err) + } } } From 6c5e8afde9ab8f751bd4c35c42e66dd114354279 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:55:30 +0100 Subject: [PATCH 049/318] pkg/cwhub: download data assets to temporary files to avoid partial fetch (#2879) --- pkg/cwhub/dataset.go | 22 ++++++++++++++++++---- pkg/cwhub/dataset_test.go | 8 ++++---- pkg/cwhub/errors.go | 2 +- pkg/cwhub/hub.go | 3 ++- pkg/cwhub/sync.go | 15 ++++++++------- 5 files changed, 33 insertions(+), 17 deletions(-) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index c900752b8b3..4612f357626 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "os" + "path/filepath" "time" "github.com/sirupsen/logrus" @@ -31,19 +32,32 @@ func downloadFile(url string, destPath string) error { return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) } - file, err := os.Create(destPath) + tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp") if err != nil { return err } - defer file.Close() + + tmpFileName := tmpFile.Name() + defer func() { + tmpFile.Close() + os.Remove(tmpFileName) + }() // avoid reading the whole file in memory - _, err = io.Copy(file, resp.Body) + _, err = io.Copy(tmpFile, resp.Body) if err != nil { return err } - if err = file.Sync(); err != nil { + if err = tmpFile.Sync(); err != nil { + return err + } + + if err = tmpFile.Close(); err != nil { + return err + } + + if err = os.Rename(tmpFileName, destPath); err != nil { return err } diff --git a/pkg/cwhub/dataset_test.go b/pkg/cwhub/dataset_test.go index f23f4878285..93d3e3bf01e 100644 --- a/pkg/cwhub/dataset_test.go +++ b/pkg/cwhub/dataset_test.go @@ -16,7 +16,7 @@ func TestDownloadFile(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() - //OK + // OK httpmock.RegisterResponder( "GET", "https://example.com/xx", @@ -36,15 +36,15 @@ func TestDownloadFile(t *testing.T) { assert.Equal(t, "example content oneoneone", string(content)) require.NoError(t, err) - //bad uri + // bad uri err = downloadFile("https://zz.com", examplePath) require.Error(t, err) - //404 + // 404 err = downloadFile("https://example.com/x", examplePath) require.Error(t, err) - //bad target + // bad target err = downloadFile("https://example.com/xx", "") require.Error(t, err) } diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go index 789c2eced7b..f1e779b5476 100644 --- a/pkg/cwhub/errors.go +++ b/pkg/cwhub/errors.go @@ -6,7 +6,7 @@ import ( ) var ( - // ErrNilRemoteHub is returned when the remote hub configuration is not provided to the NewHub constructor. + // ErrNilRemoteHub is returned when trying to download with a local-only configuration. ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") ) diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 21a19bc4526..44e24020d03 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -3,6 +3,7 @@ package cwhub import ( "bytes" "encoding/json" + "errors" "fmt" "io" "os" @@ -34,7 +35,7 @@ func (h *Hub) GetDataDir() string { // All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, logger *logrus.Logger) (*Hub, error) { if local == nil { - return nil, fmt.Errorf("no hub configuration found") + return nil, errors.New("no hub configuration found") } if logger == nil { diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 8ce91dc2193..cb7bf37867c 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -77,9 +77,9 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo if strings.HasPrefix(path, hubDir) { logger.Tracef("in hub dir") - //.../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml - //.../hub/scenarios/crowdsec/ssh_bf.yaml - //.../hub/profiles/crowdsec/linux.yaml + // .../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml + // .../hub/scenarios/crowdsec/ssh_bf.yaml + // .../hub/profiles/crowdsec/linux.yaml if len(subs) < 4 { return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } @@ -93,13 +93,14 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo } } else if strings.HasPrefix(path, installDir) { // we're in install /etc/crowdsec//... logger.Tracef("in install dir") + if len(subs) < 3 { return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } - ///.../config/parser/stage/file.yaml - ///.../config/postoverflow/stage/file.yaml - ///.../config/scenarios/scenar.yaml - ///.../config/collections/linux.yaml //file is empty + // .../config/parser/stage/file.yaml + // .../config/postoverflow/stage/file.yaml + // .../config/scenarios/scenar.yaml + // .../config/collections/linux.yaml //file is empty ret = &itemFileInfo{ inhub: false, fname: subs[len(subs)-1], From 44ec3b9e01ca992b7806c2359f9a9d50636698ea Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 8 Mar 2024 13:56:59 +0100 Subject: [PATCH 050/318] file acquis: add mutex to protect access to the internal tail map (#2878) --- pkg/acquisition/modules/file/file.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 9ab418a8442..a0c22657445 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -11,6 +11,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" "github.com/fsnotify/fsnotify" @@ -52,6 +53,7 @@ type FileSource struct { logger *log.Entry files []string exclude_regexps []*regexp.Regexp + tailMapMutex *sync.RWMutex } func (f *FileSource) GetUuid() string { @@ -105,6 +107,7 @@ func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry) error { } f.watchedDirectories = make(map[string]bool) + f.tailMapMutex = &sync.RWMutex{} f.tails = make(map[string]bool) f.watcher, err = fsnotify.NewWatcher() @@ -350,7 +353,9 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue } + f.tailMapMutex.Lock() f.tails[file] = true + f.tailMapMutex.Unlock() t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/file/live/fsnotify") return f.tailFile(out, t, tail) @@ -412,11 +417,14 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { continue } + f.tailMapMutex.RLock() if f.tails[event.Name] { + f.tailMapMutex.RUnlock() //we already have a tail on it, do not start a new one logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) break } + f.tailMapMutex.RUnlock() //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 //do not rely on stat, reclose file immediately as it's opened by Tail fd, err := os.Open(event.Name) @@ -453,7 +461,9 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break } + f.tailMapMutex.Lock() f.tails[event.Name] = true + f.tailMapMutex.Unlock() t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/tailfile") return f.tailFile(out, t, tail) From a928b4d001937396f7fd1dea134348a5802af8c2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:22:23 +0100 Subject: [PATCH 051/318] bump dependencies for geoip db / lookup (#2880) --- go.mod | 4 ++-- go.sum | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 06e0275c82b..a2b9c8618c6 100644 --- a/go.mod +++ b/go.mod @@ -63,8 +63,8 @@ require ( github.com/mattn/go-sqlite3 v1.14.16 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nxadm/tail v1.4.8 - github.com/oschwald/geoip2-golang v1.4.0 - github.com/oschwald/maxminddb-golang v1.8.0 + github.com/oschwald/geoip2-golang v1.9.0 + github.com/oschwald/maxminddb-golang v1.12.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 diff --git a/go.sum b/go.sum index 35ab5813fca..08a454cf0f0 100644 --- a/go.sum +++ b/go.sum @@ -544,11 +544,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9PvrpOfz+Ug= -github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng= -github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w= -github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk= -github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis= +github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= +github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= +github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= +github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -821,7 +820,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From e8ff13bc17916c8a1373438aa6e0fb68ac3d3a9f Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 8 Mar 2024 15:04:36 +0100 Subject: [PATCH 052/318] appsec: get the original UA from headers (#2809) --- pkg/appsec/request.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 6d472e8afae..0479dea471e 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -17,11 +17,12 @@ import ( ) const ( - URIHeaderName = "X-Crowdsec-Appsec-Uri" - VerbHeaderName = "X-Crowdsec-Appsec-Verb" - HostHeaderName = "X-Crowdsec-Appsec-Host" - IPHeaderName = "X-Crowdsec-Appsec-Ip" - APIKeyHeaderName = "X-Crowdsec-Appsec-Api-Key" + URIHeaderName = "X-Crowdsec-Appsec-Uri" + VerbHeaderName = "X-Crowdsec-Appsec-Verb" + HostHeaderName = "X-Crowdsec-Appsec-Host" + IPHeaderName = "X-Crowdsec-Appsec-Ip" + APIKeyHeaderName = "X-Crowdsec-Appsec-Api-Key" + UserAgentHeaderName = "X-Crowdsec-Appsec-User-Agent" ) type ParsedRequest struct { @@ -311,11 +312,14 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR logger.Debugf("missing '%s' header", HostHeaderName) } + userAgent := r.Header.Get(UserAgentHeaderName) //This one is optional + // delete those headers before coraza process the request delete(r.Header, IPHeaderName) delete(r.Header, HostHeaderName) delete(r.Header, URIHeaderName) delete(r.Header, VerbHeaderName) + delete(r.Header, UserAgentHeaderName) originalHTTPRequest := r.Clone(r.Context()) originalHTTPRequest.Body = io.NopCloser(bytes.NewBuffer(body)) @@ -323,6 +327,13 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR originalHTTPRequest.RequestURI = clientURI originalHTTPRequest.Method = clientMethod originalHTTPRequest.Host = clientHost + if userAgent != "" { + originalHTTPRequest.Header.Set("User-Agent", userAgent) + r.Header.Set("User-Agent", userAgent) //Override the UA in the original request, as this is what will be used by the waf engine + } else { + //If we don't have a forwarded UA, delete the one that was set by the bouncer + originalHTTPRequest.Header.Del("User-Agent") + } parsedURL, err := url.Parse(clientURI) if err != nil { From 6daaab1789fb4b02a33768f7835e4fd83217b4a7 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 11 Mar 2024 10:54:40 +0100 Subject: [PATCH 053/318] support both scope and scopes parameter in decisions filter (#2882) --- pkg/database/decisions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index c4ea0bb119e..3175a916ff5 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -49,7 +49,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } - case "scopes": + case "scopes", "scope": //Swagger mentions both of them, let's just support both to make sure we don't break anything scopes := strings.Split(value[0], ",") for i, scope := range scopes { switch strings.ToLower(scope) { From 49e0735b536f8805d08b943b85ae6961dcb3e0dc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 11 Mar 2024 13:14:01 +0100 Subject: [PATCH 054/318] cscli tests + fix bouncer/machine prune (#2883) * func tests: "cscli config feature-flags" * func tests: "cscli bouncers list" * func tests + fix: "cscli bouncers/machines prune" * lint --- cmd/crowdsec-cli/bouncers.go | 2 +- cmd/crowdsec-cli/machines.go | 37 ++++++++++++++++++------------------ test/bats/01_cscli.bats | 21 ++++++++++++++++++++ test/bats/10_bouncers.bats | 18 +++++++++++++++++- test/bats/30_machines.bats | 14 ++++++++++++++ 5 files changed, 72 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 35f4320c520..2e0adb9b95f 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -259,7 +259,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(duration)) + bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(-duration)) if err != nil { return fmt.Errorf("unable to query bouncers: %w", err) } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 7c9b9708c92..df225c06f7f 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -4,6 +4,7 @@ import ( saferand "crypto/rand" "encoding/csv" "encoding/json" + "errors" "fmt" "math/big" "os" @@ -134,7 +135,7 @@ Note: This command requires database direct access, so is intended to be run on } cli.db, err = database.NewClient(cli.cfg().DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %s", err) + return fmt.Errorf("unable to create new database client: %w", err) } return nil @@ -155,7 +156,7 @@ func (cli *cliMachines) list() error { machines, err := cli.db.ListMachines() if err != nil { - return fmt.Errorf("unable to list machines: %s", err) + return fmt.Errorf("unable to list machines: %w", err) } switch cli.cfg().Cscli.Output { @@ -166,7 +167,7 @@ func (cli *cliMachines) list() error { enc.SetIndent("", " ") if err := enc.Encode(machines); err != nil { - return fmt.Errorf("failed to marshal") + return errors.New("failed to marshal") } return nil @@ -175,7 +176,7 @@ func (cli *cliMachines) list() error { err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) if err != nil { - return fmt.Errorf("failed to write header: %s", err) + return fmt.Errorf("failed to write header: %w", err) } for _, m := range machines { @@ -257,12 +258,12 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri // create machineID if not specified by user if len(args) == 0 { if !autoAdd { - return fmt.Errorf("please specify a machine name to add, or use --auto") + return errors.New("please specify a machine name to add, or use --auto") } machineID, err = generateID("") if err != nil { - return fmt.Errorf("unable to generate machine id: %s", err) + return fmt.Errorf("unable to generate machine id: %w", err) } } else { machineID = args[0] @@ -281,20 +282,20 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri case os.IsNotExist(err) || force: dumpFile = credFile case err != nil: - return fmt.Errorf("unable to stat '%s': %s", credFile, err) + return fmt.Errorf("unable to stat '%s': %w", credFile, err) default: return fmt.Errorf(`credentials file '%s' already exists: please remove it, use "--force" or specify a different file with "-f" ("-f -" for standard output)`, credFile) } } if dumpFile == "" { - return fmt.Errorf(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) + return errors.New(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) } // create a password if it's not specified by user if machinePassword == "" && !interactive { if !autoAdd { - return fmt.Errorf("please specify a password with --password or use --auto") + return errors.New("please specify a password with --password or use --auto") } machinePassword = generatePassword(passwordLength) @@ -309,7 +310,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) if err != nil { - return fmt.Errorf("unable to create machine: %s", err) + return fmt.Errorf("unable to create machine: %w", err) } fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) @@ -320,7 +321,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri } else if serverCfg != nil && serverCfg.ListenURI != "" { apiURL = "http://" + serverCfg.ListenURI } else { - return fmt.Errorf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") + return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") } } @@ -332,12 +333,12 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %s", err) + return fmt.Errorf("unable to marshal api credentials: %w", err) } if dumpFile != "" && dumpFile != "-" { if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err) + return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) } fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) @@ -413,13 +414,13 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } if !notValidOnly { - if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(duration)); err == nil { + if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(-duration)); err == nil { machines = append(machines, pending...) } } if len(machines) == 0 { - fmt.Println("no machines to prune") + fmt.Println("No machines to prune.") return nil } @@ -438,7 +439,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b deleted, err := cli.db.BulkDeleteWatchers(machines) if err != nil { - return fmt.Errorf("unable to prune machines: %s", err) + return fmt.Errorf("unable to prune machines: %w", err) } fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) @@ -479,7 +480,7 @@ cscli machines prune --not-validated-only --force`, func (cli *cliMachines) validate(machineID string) error { if err := cli.db.ValidateMachine(machineID); err != nil { - return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) + return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) } log.Infof("machine '%s' validated successfully", machineID) @@ -495,7 +496,7 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { Example: `cscli machines validate "machine_name"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.validate(args[0]) }, } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 60a65b98d58..03f0132ea63 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -358,3 +358,24 @@ teardown() { rune -0 cscli setup assert_output --partial 'cscli setup [command]' } + +@test "cscli config feature-flags" { + # disabled + rune -0 cscli config feature-flags + assert_line '✗ cscli_setup: Enable cscli setup command (service detection)' + + # enabled in feature.yaml + CONFIG_DIR=$(dirname "$CONFIG_YAML") + echo ' - cscli_setup' >> "$CONFIG_DIR"/feature.yaml + rune -0 cscli config feature-flags + assert_line '✓ cscli_setup: Enable cscli setup command (service detection)' + + # enabled in environment + # shellcheck disable=SC2031 + export CROWDSEC_FEATURE_CSCLI_SETUP="true" + rune -0 cscli config feature-flags + assert_line '✓ cscli_setup: Enable cscli setup command (service detection)' + + # there are no retired features + rune -0 cscli config feature-flags --retired +} diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 3f6167ff6f7..1ef39ceb05e 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -25,7 +25,13 @@ teardown() { @test "there are 0 bouncers" { rune -0 cscli bouncers list -o json - assert_output "[]" + assert_json '[]' + + rune -0 cscli bouncers list -o human + assert_output --partial "Name" + + rune -0 cscli bouncers list -o raw + assert_output --partial 'name' } @test "we can add one bouncer, and delete it" { @@ -68,3 +74,13 @@ teardown() { rune -1 cscli bouncers delete ciTestBouncer rune -1 cscli bouncers delete foobarbaz } + +@test "cscli bouncers prune" { + rune -0 cscli bouncers prune + assert_output 'No bouncers to prune.' + rune -0 cscli bouncers add ciTestBouncer + + rune -0 cscli bouncers prune + assert_output 'No bouncers to prune.' +} + diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index f32c376e5b0..2a04cc9bc20 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -90,3 +90,17 @@ teardown() { rune -0 jq '. | length' <(output) assert_output 1 } + +@test "cscli machines prune" { + rune -0 cscli metrics + + rune -0 cscli machines prune + assert_output 'No machines to prune.' + + rune -0 cscli machines list -o json + rune -0 jq -r '.[-1].machineId' <(output) + rune -0 cscli machines delete "$output" + + rune -0 cscli machines prune + assert_output 'No machines to prune.' +} From 1a56a0e0b9fb5c56e4cee6d3267e758d2885bf25 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 12 Mar 2024 14:33:10 +0100 Subject: [PATCH 055/318] armhf fix for getfstype (#2884) * armhf fix for getfstype --- pkg/types/getfstype.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 4a54fc9481e..a7ee249cdf1 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -4,13 +4,13 @@ package types import ( "fmt" - "syscall" + "golang.org/x/sys/unix" ) // Generated with `man statfs | grep _MAGIC | awk '{split(tolower($1),a,"_"); print $2 ": \"" a[1] "\","}'` // ext2/3/4 duplicates removed to just have ext4 // XIAFS removed as well -var fsTypeMapping map[int]string = map[int]string{ +var fsTypeMapping map[int64]string = map[int64]string{ 0xadf5: "adfs", 0xadff: "affs", 0x5346414f: "afs", @@ -95,15 +95,15 @@ var fsTypeMapping map[int]string = map[int]string{ } func GetFSType(path string) (string, error) { - var buf syscall.Statfs_t + var buf unix.Statfs_t - err := syscall.Statfs(path, &buf) + err := unix.Statfs(path, &buf) if err != nil { return "", err } - fsType, ok := fsTypeMapping[int(buf.Type)] + fsType, ok := fsTypeMapping[buf.Type] if !ok { return "", fmt.Errorf("unknown fstype %d", buf.Type) } From bd785ede15dd745518ba7914aae39f5e53d17a10 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 12 Mar 2024 17:33:22 +0100 Subject: [PATCH 056/318] Fix armhf (#2886) * armhf compile fix --- pkg/types/getfstype.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index a7ee249cdf1..67e018782c1 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -103,7 +103,8 @@ func GetFSType(path string) (string, error) { return "", err } - fsType, ok := fsTypeMapping[buf.Type] + fsType, ok := fsTypeMapping[int64(buf.Type)] //nolint:unconvert + if !ok { return "", fmt.Errorf("unknown fstype %d", buf.Type) } From b1c09f75127704fea25dacaad1bccb7cddd008f5 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 13 Mar 2024 14:57:19 +0100 Subject: [PATCH 057/318] acquisition : take prometheus level into account (#2885) * properly take into account the aggregation level of prometheus metrics in acquisition --- cmd/crowdsec/crowdsec.go | 3 +- cmd/crowdsec/main.go | 2 +- cmd/crowdsec/metrics.go | 3 +- pkg/acquisition/acquisition.go | 30 +++++++++++--- pkg/acquisition/acquisition_test.go | 16 ++++---- .../configuration/configuration.go | 11 ++++++ pkg/acquisition/modules/appsec/appsec.go | 5 ++- .../modules/cloudwatch/cloudwatch.go | 22 ++++++++--- .../modules/cloudwatch/cloudwatch_test.go | 5 ++- pkg/acquisition/modules/docker/docker.go | 9 +++-- pkg/acquisition/modules/docker/docker_test.go | 9 +++-- pkg/acquisition/modules/file/file.go | 15 +++++-- pkg/acquisition/modules/file/file_test.go | 9 +++-- .../modules/journalctl/journalctl.go | 16 +++++--- .../modules/journalctl/journalctl_test.go | 15 +++---- pkg/acquisition/modules/kafka/kafka.go | 14 ++++--- pkg/acquisition/modules/kafka/kafka_test.go | 7 ++-- pkg/acquisition/modules/kinesis/kinesis.go | 16 +++++--- .../modules/kinesis/kinesis_test.go | 9 +++-- .../modules/kubernetesaudit/k8s_audit.go | 25 +++++++----- .../modules/kubernetesaudit/k8s_audit_test.go | 5 ++- pkg/acquisition/modules/loki/loki.go | 10 +++-- pkg/acquisition/modules/loki/loki_test.go | 9 +++-- pkg/acquisition/modules/s3/s3.go | 39 ++++++++++++------- pkg/acquisition/modules/s3/s3_test.go | 9 +++-- pkg/acquisition/modules/syslog/syslog.go | 25 +++++++----- pkg/acquisition/modules/syslog/syslog_test.go | 5 ++- .../modules/wineventlog/wineventlog.go | 2 +- .../modules/wineventlog/wineventlog_test.go | 7 ++-- .../wineventlog/wineventlog_windows.go | 18 +++++---- pkg/csconfig/config.go | 3 +- 31 files changed, 245 insertions(+), 128 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 37a12b9d3cb..f604af1dedd 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -147,7 +148,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { aggregated := false - if cConfig.Prometheus.Level == "aggregated" { + if cConfig.Prometheus.Level == configuration.CFG_METRICS_AGGREGATE { aggregated = true } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 8950790480a..5f04e9b99a4 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -123,7 +123,7 @@ func LoadAcquisition(cConfig *csconfig.Config) ([]acquisition.DataSource, error) return nil, fmt.Errorf("failed to configure datasource for %s: %w", flags.OneShotDSN, err) } } else { - dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec) + dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec, cConfig.Prometheus) if err != nil { return nil, err } diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 682968bb75a..d670051cea0 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -12,6 +12,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -161,7 +162,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { // Registering prometheus // If in aggregated mode, do not register events associated with a source, to keep the cardinality low - if config.Level == "aggregated" { + if config.Level == configuration.CFG_METRICS_AGGREGATE { log.Infof("Loading aggregated prometheus collectors") prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, globalCsInfo, globalParsingHistogram, globalPourHistogram, diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 33602936369..677bf664e31 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -54,7 +54,7 @@ type DataSource interface { GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime - Configure([]byte, *log.Entry) error // Complete the YAML datasource configuration and perform runtime checks. + Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource GetMode() string // Get the mode (TAIL, CAT or SERVER) GetName() string // Get the name of the module @@ -94,7 +94,7 @@ func GetDataSourceIface(dataSourceType string) DataSource { // if the configuration is not valid it returns an error. // If the datasource can't be run (eg. journalctl not available), it still returns an error which // can be checked for the appropriate action. -func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg) (*DataSource, error) { +func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (*DataSource, error) { // we dump it back to []byte, because we want to decode the yaml blob twice: // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) @@ -122,7 +122,7 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg) (*DataS return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} } /* configure the actual datasource */ - if err := dataSrc.Configure(yamlConfig, subLogger); err != nil { + if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) } @@ -180,10 +180,30 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr return sources, nil } +func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { + if prom == nil { + return configuration.METRICS_FULL + + } + if !prom.Enabled { + return configuration.METRICS_NONE + } + if prom.Level == configuration.CFG_METRICS_AGGREGATE { + return configuration.METRICS_AGGREGATE + } + + if prom.Level == configuration.CFG_METRICS_FULL { + return configuration.METRICS_FULL + } + return configuration.METRICS_FULL + +} + // LoadAcquisitionFromFile unmarshals the configuration item and checks its availability -func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, error) { +func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig.PrometheusCfg) ([]DataSource, error) { var sources []DataSource + metrics_level := GetMetricsLevelFromPromCfg(prom) for _, acquisFile := range config.AcquisitionFiles { log.Infof("loading acquisition file : %s", acquisFile) yamlFile, err := os.Open(acquisFile) @@ -225,7 +245,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, } uniqueId := uuid.NewString() sub.UniqueId = uniqueId - src, err := DataSourceConfigure(sub) + src, err := DataSourceConfigure(sub, metrics_level) if err != nil { var dserr *DataSourceUnavailableError if errors.As(err, &dserr) { diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 44b3878e1d0..33e4948552a 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -35,7 +35,7 @@ func (f *MockSource) UnmarshalConfig(cfg []byte) error { return nil } -func (f *MockSource) Configure(cfg []byte, logger *log.Entry) error { +func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger if err := f.UnmarshalConfig(cfg); err != nil { return err @@ -182,7 +182,7 @@ wowo: ajsajasjas t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} yaml.Unmarshal([]byte(tc.String), &common) - ds, err := DataSourceConfigure(common) + ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) if tc.ExpectedError != "" { return @@ -283,7 +283,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { for _, tc := range tests { tc := tc t.Run(tc.TestName, func(t *testing.T) { - dss, err := LoadAcquisitionFromFile(&tc.Config) + dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) if tc.ExpectedError != "" { return @@ -305,7 +305,7 @@ type MockCat struct { logger *log.Entry } -func (f *MockCat) Configure(cfg []byte, logger *log.Entry) error { +func (f *MockCat) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger if f.Mode == "" { f.Mode = configuration.CAT_MODE @@ -349,7 +349,7 @@ type MockTail struct { logger *log.Entry } -func (f *MockTail) Configure(cfg []byte, logger *log.Entry) error { +func (f *MockTail) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger if f.Mode == "" { f.Mode = configuration.TAIL_MODE @@ -497,8 +497,10 @@ type MockSourceByDSN struct { logger *log.Entry //nolint: unused } -func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } -func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry) error { return nil } +func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } +func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { + return nil +} func (f *MockSourceByDSN) GetMode() string { return f.Mode } func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go index 5ec1a4ac4c3..3e27da1b9e6 100644 --- a/pkg/acquisition/configuration/configuration.go +++ b/pkg/acquisition/configuration/configuration.go @@ -19,3 +19,14 @@ type DataSourceCommonCfg struct { var TAIL_MODE = "tail" var CAT_MODE = "cat" var SERVER_MODE = "server" // No difference with tail, just a bit more verbose + +const ( + METRICS_NONE = iota + METRICS_AGGREGATE + METRICS_FULL +) + +const ( + CFG_METRICS_AGGREGATE = "aggregated" + CFG_METRICS_FULL = "full" +) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index a3c8c7dd8ee..f97905406ce 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -49,6 +49,7 @@ type AppsecSourceConfig struct { // runtime structure of AppsecSourceConfig type AppsecSource struct { + metricsLevel int config AppsecSourceConfig logger *log.Entry mux *http.ServeMux @@ -149,13 +150,13 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{AppsecReqCounter, AppsecBlockCounter, AppsecRuleHits, AppsecOutbandParsingHistogram, AppsecInbandParsingHistogram, AppsecGlobalParsingHistogram} } -func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { return errors.Wrap(err, "unable to parse appsec configuration") } w.logger = logger - + w.metricsLevel = MetricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) if w.config.AuthCacheDuration == nil { diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 89887bef0b8..1ac1465d390 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -43,7 +43,8 @@ var linesRead = prometheus.NewCounterVec( // CloudwatchSource is the runtime instance keeping track of N streams within 1 cloudwatch group type CloudwatchSource struct { - Config CloudwatchSourceConfiguration + metricsLevel int + Config CloudwatchSourceConfiguration /*runtime stuff*/ logger *log.Entry t *tomb.Tomb @@ -152,11 +153,12 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { err := cw.UnmarshalConfig(yamlConfig) if err != nil { return err } + cw.metricsLevel = MetricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -385,7 +387,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) - openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() + if cw.metricsLevel != configuration.METRICS_NONE { + openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() + } break } shouldCreate = false @@ -395,7 +399,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha //let's start watching this stream if shouldCreate { - openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() + if cw.metricsLevel != configuration.METRICS_NONE { + openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() + } newStream.t = tomb.Tomb{} newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName}) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) @@ -409,7 +415,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha for idx, stream := range cw.monitoredStreams { if !cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("remove dead stream %s", stream.StreamName) - openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() + if cw.metricsLevel != configuration.METRICS_NONE { + openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() + } } else { newMonitoredStreams = append(newMonitoredStreams, stream) } @@ -485,7 +493,9 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err) } else { cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) - linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() + if cw.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() + } outChan <- evt } } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 5d64755e2e9..11842e61ff2 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" @@ -427,7 +428,7 @@ stream_name: test_stream`), dbgLogger.Logger.SetLevel(log.DebugLevel) dbgLogger.Infof("starting test") cw := CloudwatchSource{} - err := cw.Configure(tc.config, dbgLogger) + err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { @@ -559,7 +560,7 @@ stream_name: test_stream`), dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) cw := CloudwatchSource{} - err := cw.Configure(tc.config, dbgLogger) + err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { return diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 60f1100b35a..9f1febf2cb7 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -46,6 +46,7 @@ type DockerConfiguration struct { } type DockerSource struct { + metricsLevel int Config DockerConfiguration runningContainerState map[string]*ContainerConfig compiledContainerName []*regexp.Regexp @@ -128,9 +129,9 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { d.logger = logger - + d.metricsLevel = MetricsLevel err := d.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -325,7 +326,9 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er l.Src = containerConfig.Name l.Process = true l.Module = d.GetName() - linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() + if d.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() + } evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} out <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index c4d23168a37..6c010f895d3 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -13,6 +13,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" dockerTypes "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" @@ -60,7 +61,7 @@ container_name: for _, test := range tests { f := DockerSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -162,7 +163,7 @@ container_name_regexp: for _, ts := range tests { var ( - logger *log.Logger + logger *log.Logger subLogger *log.Entry ) @@ -182,7 +183,7 @@ container_name_regexp: out := make(chan types.Event) dockerSource := DockerSource{} - err := dockerSource.Configure([]byte(ts.config), subLogger) + err := dockerSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -304,7 +305,7 @@ func TestOneShot(t *testing.T) { for _, ts := range tests { var ( subLogger *log.Entry - logger *log.Logger + logger *log.Logger ) if ts.expectedOutput != "" { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index a0c22657445..efc89715296 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -46,6 +46,7 @@ type FileConfiguration struct { } type FileSource struct { + metricsLevel int config FileConfiguration watcher *fsnotify.Watcher watchedDirectories map[string]bool @@ -98,8 +99,9 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { f.logger = logger + f.metricsLevel = MetricsLevel err := f.UnmarshalConfig(yamlConfig) if err != nil { @@ -517,12 +519,19 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai if line.Text == "" { //skip empty lines continue } - linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() + if f.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() + } + src := tail.Filename + if f.metricsLevel == configuration.METRICS_AGGREGATE { + src = filepath.Base(tail.Filename) + } + l := types.Line{ Raw: trimLine(line.Text), Labels: f.config.Labels, Time: line.Time, - Src: tail.Filename, + Src: src, Process: true, Module: f.GetName(), } diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 410beb4bc85..ad5fe8bfabd 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -56,7 +57,7 @@ exclude_regexps: ["as[a-$d"]`, tc := tc t.Run(tc.name, func(t *testing.T) { f := fileacquisition.FileSource{} - err := f.Configure([]byte(tc.config), subLogger) + err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } @@ -222,7 +223,7 @@ filename: test_files/test_delete.log`, tc.setup() } - err := f.Configure([]byte(tc.config), subLogger) + err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedConfigErr) if tc.expectedConfigErr != "" { return @@ -384,7 +385,7 @@ force_inotify: true`, testPattern), tc.setup() } - err := f.Configure([]byte(tc.config), subLogger) + err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) if tc.afterConfigure != nil { @@ -455,7 +456,7 @@ exclude_regexps: ["\\.gz$"]` }) f := fileacquisition.FileSource{} - if err := f.Configure([]byte(config), subLogger); err != nil { + if err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE); err != nil { subLogger.Fatalf("unexpected error: %s", err) } diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 55091a7b5eb..e8bb5a3edd5 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -26,10 +26,11 @@ type JournalCtlConfiguration struct { } type JournalCtlSource struct { - config JournalCtlConfiguration - logger *log.Entry - src string - args []string + metricsLevel int + config JournalCtlConfiguration + logger *log.Entry + src string + args []string } const journalctlCmd string = "journalctl" @@ -131,7 +132,9 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err l.Src = j.src l.Process = true l.Module = j.GetName() - linesRead.With(prometheus.Labels{"source": j.src}).Inc() + if j.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": j.src}).Inc() + } var evt types.Event if !j.config.UseTimeMachine { evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} @@ -194,8 +197,9 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { j.logger = logger + j.metricsLevel = MetricsLevel err := j.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index a91fba31b34..9d1f1bb7e0e 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -10,6 +10,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" @@ -52,7 +53,7 @@ journalctl_filter: for _, test := range tests { f := JournalCtlSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -144,9 +145,9 @@ journalctl_filter: } for _, ts := range tests { var ( - logger *log.Logger + logger *log.Logger subLogger *log.Entry - hook *test.Hook + hook *test.Hook ) if ts.expectedOutput != "" { @@ -165,7 +166,7 @@ journalctl_filter: out := make(chan types.Event, 100) j := JournalCtlSource{} - err := j.Configure([]byte(ts.config), subLogger) + err := j.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -218,9 +219,9 @@ journalctl_filter: } for _, ts := range tests { var ( - logger *log.Logger + logger *log.Logger subLogger *log.Entry - hook *test.Hook + hook *test.Hook ) if ts.expectedOutput != "" { @@ -239,7 +240,7 @@ journalctl_filter: out := make(chan types.Event) j := JournalCtlSource{} - err := j.Configure([]byte(ts.config), subLogger) + err := j.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 5b6e8fc0d41..f64bb1df306 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -52,9 +52,10 @@ type TLSConfig struct { } type KafkaSource struct { - Config KafkaConfiguration - logger *log.Entry - Reader *kafka.Reader + metricsLevel int + Config KafkaConfiguration + logger *log.Entry + Reader *kafka.Reader } func (k *KafkaSource) GetUuid() string { @@ -86,8 +87,9 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { return err } -func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { k.logger = logger + k.metricsLevel = MetricsLevel k.logger.Debugf("start configuring %s source", dataSourceName) @@ -170,7 +172,9 @@ func (k *KafkaSource) ReadMessage(out chan types.Event) error { Module: k.GetName(), } k.logger.Tracef("line with message read from topic '%s': %+v", k.Config.Topic, l) - linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() + if k.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() + } var evt types.Event if !k.Config.UseTimeMachine { diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 92ccd4c7a3f..6eda37a377e 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -75,7 +76,7 @@ group_id: crowdsec`, }) for _, test := range tests { k := KafkaSource{} - err := k.Configure([]byte(test.config), subLogger) + err := k.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -169,7 +170,7 @@ func TestStreamingAcquisition(t *testing.T) { source: kafka brokers: - localhost:9092 -topic: crowdsecplaintext`), subLogger) +topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure kafka source : %s", err) } @@ -245,7 +246,7 @@ tls: client_cert: ./testdata/kafkaClient.certificate.pem client_key: ./testdata/kafkaClient.key ca_cert: ./testdata/snakeoil-ca-1.crt - `), subLogger) + `), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure kafka source : %s", err) } diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index e2cc7996349..a86816244f6 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -38,6 +38,7 @@ type KinesisConfiguration struct { } type KinesisSource struct { + metricsLevel int Config KinesisConfiguration logger *log.Entry kClient *kinesis.Kinesis @@ -149,8 +150,9 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { k.logger = logger + k.metricsLevel = MetricsLevel err := k.UnmarshalConfig(yamlConfig) if err != nil { @@ -283,11 +285,15 @@ func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutpu func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan types.Event, logger *log.Entry, shardId string) { for _, record := range records { if k.Config.StreamARN != "" { - linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamARN, "shard": shardId}).Inc() - linesRead.With(prometheus.Labels{"stream": k.Config.StreamARN}).Inc() + if k.metricsLevel != configuration.METRICS_NONE { + linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamARN, "shard": shardId}).Inc() + linesRead.With(prometheus.Labels{"stream": k.Config.StreamARN}).Inc() + } } else { - linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamName, "shard": shardId}).Inc() - linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() + if k.metricsLevel != configuration.METRICS_NONE { + linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamName, "shard": shardId}).Inc() + linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() + } } var data []CloudwatchSubscriptionLogEvent var err error diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 662d6040e0f..a4e4f2f7378 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -143,7 +144,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, }) for _, test := range tests { f := KinesisSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -172,7 +173,7 @@ stream_name: stream-1-shard`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithFields(log.Fields{ "type": "kinesis", - })) + }), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -218,7 +219,7 @@ stream_name: stream-2-shards`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithFields(log.Fields{ "type": "kinesis", - })) + }), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -267,7 +268,7 @@ from_subscription: true`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithFields(log.Fields{ "type": "kinesis", - })) + }), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index ee44bd01ae2..7d27f9e0390 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -28,12 +28,13 @@ type KubernetesAuditConfiguration struct { } type KubernetesAuditSource struct { - config KubernetesAuditConfiguration - logger *log.Entry - mux *http.ServeMux - server *http.Server - outChan chan types.Event - addr string + metricsLevel int + config KubernetesAuditConfiguration + logger *log.Entry + mux *http.ServeMux + server *http.Server + outChan chan types.Event + addr string } var eventCount = prometheus.NewCounterVec( @@ -93,8 +94,9 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry) error { +func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { ka.logger = logger + ka.metricsLevel = MetricsLevel err := ka.UnmarshalConfig(config) if err != nil { @@ -161,7 +163,10 @@ func (ka *KubernetesAuditSource) Dump() interface{} { } func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.Request) { - requestCount.WithLabelValues(ka.addr).Inc() + + if ka.metricsLevel != configuration.METRICS_NONE { + requestCount.WithLabelValues(ka.addr).Inc() + } if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) return @@ -185,7 +190,9 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R remoteIP := strings.Split(r.RemoteAddr, ":")[0] for _, auditEvent := range auditEvents.Items { - eventCount.WithLabelValues(ka.addr).Inc() + if ka.metricsLevel != configuration.METRICS_NONE { + eventCount.WithLabelValues(ka.addr).Inc() + } bytesEvent, err := json.Marshal(auditEvent) if err != nil { ka.logger.Errorf("Error marshaling audit event: %s", err) diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index c3502c95685..331822ecf5b 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -81,7 +82,7 @@ webhook_path: /k8s-audit`, require.NoError(t, err) - err = f.Configure([]byte(test.config), subLogger) + err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) f.StreamingAcquisition(out, tb) @@ -253,7 +254,7 @@ webhook_path: /k8s-audit`, f := KubernetesAuditSource{} err := f.UnmarshalConfig([]byte(test.config)) require.NoError(t, err) - err = f.Configure([]byte(test.config), subLogger) + err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 555deefe25a..3625c689516 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -57,7 +57,8 @@ type LokiConfiguration struct { } type LokiSource struct { - Config LokiConfiguration + metricsLevel int + Config LokiConfiguration Client *lokiclient.LokiClient @@ -118,9 +119,10 @@ func (l *LokiSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (l *LokiSource) Configure(config []byte, logger *log.Entry) error { +func (l *LokiSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { l.Config = LokiConfiguration{} l.logger = logger + l.metricsLevel = MetricsLevel err := l.UnmarshalConfig(config) if err != nil { return err @@ -302,7 +304,9 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri ll.Process = true ll.Module = l.GetName() - linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + if l.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + } expectMode := types.LIVE if l.Config.UseTimeMachine { expectMode = types.TIMEMACHINE diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 8511d5445af..9ac3ccbd321 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -130,7 +131,7 @@ query: > for _, test := range tests { t.Run(test.testName, func(t *testing.T) { lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(test.config), subLogger) + err := lokiSource.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) if test.password != "" { @@ -346,7 +347,7 @@ since: 1h "type": "loki", }) lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(ts.config), subLogger) + err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) @@ -436,7 +437,7 @@ query: > lokiTomb := tomb.Tomb{} lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(ts.config), subLogger) + err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -514,7 +515,7 @@ query: > title := time.Now().String() lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(config), subLogger) + err := lokiSource.Configure([]byte(config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 651d40d3d50..98b2e685cd0 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -47,15 +47,16 @@ type S3Configuration struct { } type S3Source struct { - Config S3Configuration - logger *log.Entry - s3Client s3iface.S3API - sqsClient sqsiface.SQSAPI - readerChan chan S3Object - t *tomb.Tomb - out chan types.Event - ctx aws.Context - cancel context.CancelFunc + MetricsLevel int + Config S3Configuration + logger *log.Entry + s3Client s3iface.S3API + sqsClient sqsiface.SQSAPI + readerChan chan S3Object + t *tomb.Tomb + out chan types.Event + ctx aws.Context + cancel context.CancelFunc } type S3Object struct { @@ -345,7 +346,9 @@ func (s *S3Source) sqsPoll() error { logger.Tracef("SQS output: %v", out) logger.Debugf("Received %d messages from SQS", len(out.Messages)) for _, message := range out.Messages { - sqsMessagesReceived.WithLabelValues(s.Config.SQSName).Inc() + if s.MetricsLevel != configuration.METRICS_NONE { + sqsMessagesReceived.WithLabelValues(s.Config.SQSName).Inc() + } bucket, key, err := s.extractBucketAndPrefix(message.Body) if err != nil { logger.Errorf("Error while parsing SQS message: %s", err) @@ -426,14 +429,20 @@ func (s *S3Source) readFile(bucket string, key string) error { default: text := scanner.Text() logger.Tracef("Read line %s", text) - linesRead.WithLabelValues(bucket).Inc() + if s.MetricsLevel != configuration.METRICS_NONE { + linesRead.WithLabelValues(bucket).Inc() + } l := types.Line{} l.Raw = text l.Labels = s.Config.Labels l.Time = time.Now().UTC() l.Process = true l.Module = s.GetName() - l.Src = bucket + "/" + key + if s.MetricsLevel == configuration.METRICS_FULL { + l.Src = bucket + "/" + key + } else if s.MetricsLevel == configuration.METRICS_AGGREGATE { + l.Src = bucket + } var evt types.Event if !s.Config.UseTimeMachine { evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} @@ -446,7 +455,9 @@ func (s *S3Source) readFile(bucket string, key string) error { if err := scanner.Err(); err != nil { return fmt.Errorf("failed to read object %s/%s: %s", bucket, key, err) } - objectsRead.WithLabelValues(bucket).Inc() + if s.MetricsLevel != configuration.METRICS_NONE { + objectsRead.WithLabelValues(bucket).Inc() + } return nil } @@ -505,7 +516,7 @@ func (s *S3Source) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *S3Source) Configure(yamlConfig []byte, logger *log.Entry) error { +func (s *S3Source) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := s.UnmarshalConfig(yamlConfig) if err != nil { return err diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 02423b1392c..e94521d187f 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -66,7 +67,7 @@ sqs_name: foobar for _, test := range tests { t.Run(test.name, func(t *testing.T) { f := S3Source{} - err := f.Configure([]byte(test.config), nil) + err := f.Configure([]byte(test.config), nil, configuration.METRICS_NONE) if err == nil { t.Fatalf("expected error, got none") } @@ -111,7 +112,7 @@ polling_method: list t.Run(test.name, func(t *testing.T) { f := S3Source{} logger := log.NewEntry(log.New()) - err := f.Configure([]byte(test.config), logger) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -306,7 +307,7 @@ prefix: foo/ f := S3Source{} logger := log.NewEntry(log.New()) logger.Logger.SetLevel(log.TraceLevel) - err := f.Configure([]byte(test.config), logger) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -381,7 +382,7 @@ sqs_name: test linesRead := 0 f := S3Source{} logger := log.NewEntry(log.New()) - err := f.Configure([]byte(test.config), logger) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 8aed2836816..47940c3294c 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -29,10 +29,11 @@ type SyslogConfiguration struct { } type SyslogSource struct { - config SyslogConfiguration - logger *log.Entry - server *syslogserver.SyslogServer - serverTomb *tomb.Tomb + metricsLevel int + config SyslogConfiguration + logger *log.Entry + server *syslogserver.SyslogServer + serverTomb *tomb.Tomb } var linesReceived = prometheus.NewCounterVec( @@ -121,10 +122,10 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { s.logger = logger s.logger.Infof("Starting syslog datasource configuration") - + s.metricsLevel = MetricsLevel err := s.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -198,7 +199,9 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha logger := s.logger.WithField("client", syslogLine.Client) logger.Tracef("raw: %s", syslogLine) - linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() + if s.metricsLevel != configuration.METRICS_NONE { + linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() + } p := rfc3164.NewRFC3164Parser(rfc3164.WithCurrentYear()) err := p.Parse(syslogLine.Message) if err != nil { @@ -211,10 +214,14 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha continue } line = s.buildLogFromSyslog(p2.Timestamp, p2.Hostname, p2.Tag, p2.PID, p2.Message) - linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc5424"}).Inc() + if s.metricsLevel != configuration.METRICS_NONE { + linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc5424"}).Inc() + } } else { line = s.buildLogFromSyslog(p.Timestamp, p.Hostname, p.Tag, p.PID, p.Message) - linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc3164"}).Inc() + if s.metricsLevel != configuration.METRICS_NONE { + linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc3164"}).Inc() + } } line = strings.TrimSuffix(line, "\n") diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 1d2ba3fb648..ba14c7db053 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -9,6 +9,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -56,7 +57,7 @@ listen_addr: 10.0.0`, }) for _, test := range tests { s := SyslogSource{} - err := s.Configure([]byte(test.config), subLogger) + err := s.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -137,7 +138,7 @@ listen_addr: 127.0.0.1`, "type": "syslog", }) s := SyslogSource{} - err := s.Configure([]byte(ts.config), subLogger) + err := s.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure syslog source : %s", err) } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go index f0eca5d13d7..44035d0a708 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -23,7 +23,7 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index 053ba88b52d..c937ceba825 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" @@ -58,7 +59,7 @@ xpath_query: test`, }) for _, test := range tests { f := WinEventLogSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) assert.Contains(t, err.Error(), test.expectedErr) } } @@ -117,7 +118,7 @@ event_level: bla`, }) for _, test := range tests { f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger) + f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) q, err := f.buildXpathQuery() if test.expectedErr != "" { if err == nil { @@ -194,7 +195,7 @@ event_ids: to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger) + f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) f.StreamingAcquisition(c, to) time.Sleep(time.Second) lines := test.expectedLines diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index ee69dc35cdd..8adbf1e53c5 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -34,11 +34,12 @@ type WinEventLogConfiguration struct { } type WinEventLogSource struct { - config WinEventLogConfiguration - logger *log.Entry - evtConfig *winlog.SubscribeConfig - query string - name string + metricsLevel int + config WinEventLogConfiguration + logger *log.Entry + evtConfig *winlog.SubscribeConfig + query string + name string } type QueryList struct { @@ -188,7 +189,9 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error continue } for _, event := range renderedEvents { - linesRead.With(prometheus.Labels{"source": w.name}).Inc() + if w.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": w.name}).Inc() + } l := types.Line{} l.Raw = event l.Module = w.GetName() @@ -270,8 +273,9 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { w.logger = logger + w.metricsLevel = MetricsLevel err := w.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 0c960803e04..e007e042bd5 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -118,7 +119,7 @@ func NewDefaultConfig() *Config { } prometheus := PrometheusCfg{ Enabled: true, - Level: "full", + Level: configuration.CFG_METRICS_FULL, } configPaths := ConfigurationPaths{ ConfigDir: DefaultConfigPath("."), From 2a7e8383c88674481102fe87ac9b86b1e87dbcb5 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 13 Mar 2024 17:20:06 +0100 Subject: [PATCH 058/318] fix #2889 (#2892) * fix #2889 --- pkg/appsec/query_utils.go | 78 +++++++++++++ pkg/appsec/query_utils_test.go | 207 +++++++++++++++++++++++++++++++++ pkg/appsec/request.go | 2 +- 3 files changed, 286 insertions(+), 1 deletion(-) create mode 100644 pkg/appsec/query_utils.go create mode 100644 pkg/appsec/query_utils_test.go diff --git a/pkg/appsec/query_utils.go b/pkg/appsec/query_utils.go new file mode 100644 index 00000000000..0c886e0ea51 --- /dev/null +++ b/pkg/appsec/query_utils.go @@ -0,0 +1,78 @@ +package appsec + +// This file is mostly stolen from net/url package, but with some modifications to allow less strict parsing of query strings + +import ( + "net/url" + "strings" +) + +// parseQuery and parseQuery are copied net/url package, but allow semicolon in values +func ParseQuery(query string) url.Values { + m := make(url.Values) + parseQuery(m, query) + return m +} + +func parseQuery(m url.Values, query string) { + for query != "" { + var key string + key, query, _ = strings.Cut(query, "&") + + if key == "" { + continue + } + key, value, _ := strings.Cut(key, "=") + //for now we'll just ignore the errors, but ideally we want to fire some "internal" rules when we see invalid query strings + key = unescape(key) + value = unescape(value) + m[key] = append(m[key], value) + } +} + +func hexDigitToByte(digit byte) (byte, bool) { + switch { + case digit >= '0' && digit <= '9': + return digit - '0', true + case digit >= 'a' && digit <= 'f': + return digit - 'a' + 10, true + case digit >= 'A' && digit <= 'F': + return digit - 'A' + 10, true + default: + return 0, false + } +} + +func unescape(input string) string { + ilen := len(input) + res := strings.Builder{} + res.Grow(ilen) + for i := 0; i < ilen; i++ { + ci := input[i] + if ci == '+' { + res.WriteByte(' ') + continue + } + if ci == '%' { + if i+2 >= ilen { + res.WriteByte(ci) + continue + } + hi, ok := hexDigitToByte(input[i+1]) + if !ok { + res.WriteByte(ci) + continue + } + lo, ok := hexDigitToByte(input[i+2]) + if !ok { + res.WriteByte(ci) + continue + } + res.WriteByte(hi<<4 | lo) + i += 2 + continue + } + res.WriteByte(ci) + } + return res.String() +} diff --git a/pkg/appsec/query_utils_test.go b/pkg/appsec/query_utils_test.go new file mode 100644 index 00000000000..2ad7927968d --- /dev/null +++ b/pkg/appsec/query_utils_test.go @@ -0,0 +1,207 @@ +package appsec + +import ( + "net/url" + "reflect" + "testing" +) + +func TestParseQuery(t *testing.T) { + tests := []struct { + name string + query string + expected url.Values + }{ + { + name: "Simple query", + query: "foo=bar", + expected: url.Values{ + "foo": []string{"bar"}, + }, + }, + { + name: "Multiple values", + query: "foo=bar&foo=baz", + expected: url.Values{ + "foo": []string{"bar", "baz"}, + }, + }, + { + name: "Empty value", + query: "foo=", + expected: url.Values{ + "foo": []string{""}, + }, + }, + { + name: "Empty key", + query: "=bar", + expected: url.Values{ + "": []string{"bar"}, + }, + }, + { + name: "Empty query", + query: "", + expected: url.Values{}, + }, + { + name: "Multiple keys", + query: "foo=bar&baz=qux", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + }, + }, + { + name: "Multiple keys with empty value", + query: "foo=bar&baz=qux&quux=", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + "quux": []string{""}, + }, + }, + { + name: "Multiple keys with empty value and empty key", + query: "foo=bar&baz=qux&quux=&=quuz", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz", + expected: url.Values{ + "foo": []string{"bar", "baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals and question mark", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz&foo=bar%3Fbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz", "bar?baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "keys with escaped characters", + query: "foo=ba;r&baz=qu;;x&quux=x\\&ww&xx=qu?uz&", + expected: url.Values{ + "foo": []string{"ba;r"}, + "baz": []string{"qu;;x"}, + "quux": []string{"x\\"}, + "ww": []string{""}, + "xx": []string{"qu?uz"}, + }, + }, + { + name: "hexadecimal characters", + query: "foo=bar%20baz", + expected: url.Values{ + "foo": []string{"bar baz"}, + }, + }, + { + name: "hexadecimal characters upper and lower case", + query: "foo=Ba%42%42&bar=w%2f%2F", + expected: url.Values{ + "foo": []string{"BaBB"}, + "bar": []string{"w//"}, + }, + }, + { + name: "hexadecimal characters with invalid characters", + query: "foo=bar%20baz%2", + expected: url.Values{ + "foo": []string{"bar baz%2"}, + }, + }, + { + name: "hexadecimal characters with invalid hex characters", + query: "foo=bar%xx", + expected: url.Values{ + "foo": []string{"bar%xx"}, + }, + }, + { + name: "hexadecimal characters with invalid 2nd hex character", + query: "foo=bar%2x", + expected: url.Values{ + "foo": []string{"bar%2x"}, + }, + }, + { + name: "url +", + query: "foo=bar+x", + expected: url.Values{ + "foo": []string{"bar x"}, + }, + }, + { + name: "url &&", + query: "foo=bar&&lol=bur", + expected: url.Values{ + "foo": []string{"bar"}, + "lol": []string{"bur"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := ParseQuery(test.query) + if !reflect.DeepEqual(res, test.expected) { + t.Fatalf("unexpected result: %v", res) + } + }) + } +} diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 0479dea471e..effb1828307 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -367,7 +367,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR URL: r.URL, Proto: r.Proto, Body: body, - Args: parsedURL.Query(), //TODO: Check if there's not potential bypass as it excludes malformed args + Args: ParseQuery(parsedURL.RawQuery), TransferEncoding: r.TransferEncoding, ResponseChannel: make(chan AppsecTempResponse), RemoteAddrNormalized: remoteAddrNormalized, From 6c042f18f0d2760018eb84ba7dc0a86dc38ca386 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 14 Mar 2024 10:43:02 +0100 Subject: [PATCH 059/318] LAPI: local api unix socket support (#2770) --- .gitignore | 5 +- cmd/crowdsec-cli/config_show.go | 1 + cmd/crowdsec-cli/lapi.go | 52 ++++---- cmd/crowdsec-cli/lapi_test.go | 49 ++++++++ cmd/crowdsec-cli/machines.go | 4 +- docker/test/tests/test_tls.py | 5 +- pkg/apiclient/auth_jwt.go | 11 +- pkg/apiclient/client.go | 92 ++++++++++---- pkg/apiclient/client_test.go | 154 ++++++++++++++++++++++- pkg/apiserver/apiserver.go | 119 +++++++++++++----- pkg/apiserver/controllers/v1/alerts.go | 7 +- pkg/apiserver/controllers/v1/utils.go | 20 ++- pkg/apiserver/middlewares/v1/api_key.go | 28 +++-- pkg/apiserver/middlewares/v1/jwt.go | 29 +++-- pkg/csconfig/api.go | 55 +++++++-- test/bats/01_crowdsec_lapi.bats | 10 +- test/bats/01_cscli.bats | 5 +- test/bats/09_socket.bats | 158 ++++++++++++++++++++++++ test/bats/30_machines_tls.bats | 45 ++++++- test/lib/config/config-global | 1 + test/lib/config/config-local | 5 +- 21 files changed, 717 insertions(+), 138 deletions(-) create mode 100644 cmd/crowdsec-cli/lapi_test.go create mode 100644 test/bats/09_socket.bats diff --git a/.gitignore b/.gitignore index 3054e9eb3c2..6e6624fd282 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,10 @@ *.dylib *~ .pc + +# IDEs .vscode +.idea # If vendor is included, allow prebuilt (wasm?) libraries. !vendor/**/*.so @@ -34,7 +37,7 @@ test/coverage/* *.swo # Dependencies are not vendored by default, but a tarball is created by "make vendor" -# and provided in the release. Used by freebsd, gentoo, etc. +# and provided in the release. Used by gentoo, etc. vendor/ vendor.tgz diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index 634ca77410e..c277173c387 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -100,6 +100,7 @@ API Client: {{- if .API.Server }} Local API Server{{if and .API.Server.Enable (not (ValueBool .API.Server.Enable))}} (disabled){{end}}: - Listen URL : {{.API.Server.ListenURI}} + - Listen Socket : {{.API.Server.ListenSocket}} - Profile File : {{.API.Server.ProfilesPath}} {{- if .API.Server.TLS }} diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0bb4a31b72a..13a9d8d7e77 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -44,7 +44,9 @@ func (cli *cliLapi) status() error { password := strfmt.Password(cfg.API.Client.Credentials.Password) login := cfg.API.Client.Credentials.Login - apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) + origURL := cfg.API.Client.Credentials.URL + + apiURL, err := url.Parse(origURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } @@ -59,7 +61,7 @@ func (cli *cliLapi) status() error { return fmt.Errorf("failed to get scenarios: %w", err) } - Client, err = apiclient.NewDefaultClient(apiurl, + Client, err = apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) @@ -74,7 +76,8 @@ func (cli *cliLapi) status() error { } log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", login, apiurl) + // use the original string because apiURL would print 'http://unix/' + log.Infof("Trying to authenticate with username %s on %s", login, origURL) _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { @@ -101,23 +104,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e password := strfmt.Password(generatePassword(passwordLength)) - if apiURL == "" { - if cfg.API.Client == nil || cfg.API.Client.Credentials == nil || cfg.API.Client.Credentials.URL == "" { - return fmt.Errorf("no Local API URL. Please provide it in your configuration or with the -u parameter") - } - - apiURL = cfg.API.Client.Credentials.URL - } - /*URL needs to end with /, but user doesn't care*/ - if !strings.HasSuffix(apiURL, "/") { - apiURL += "/" - } - /*URL needs to start with http://, but user doesn't care*/ - if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") { - apiURL = "http://" + apiURL - } - - apiurl, err := url.Parse(apiURL) + apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } @@ -173,13 +160,36 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e return nil } +// prepareAPIURL checks/fixes a LAPI connection url (http, https or socket) and returns an URL struct +func prepareAPIURL(clientCfg *csconfig.LocalApiClientCfg, apiURL string) (*url.URL, error) { + if apiURL == "" { + if clientCfg == nil || clientCfg.Credentials == nil || clientCfg.Credentials.URL == "" { + return nil, errors.New("no Local API URL. Please provide it in your configuration or with the -u parameter") + } + + apiURL = clientCfg.Credentials.URL + } + + // URL needs to end with /, but user doesn't care + if !strings.HasSuffix(apiURL, "/") { + apiURL += "/" + } + + // URL needs to start with http://, but user doesn't care + if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") && !strings.HasPrefix(apiURL, "/") { + apiURL = "http://" + apiURL + } + + return url.Parse(apiURL) +} + func (cli *cliLapi) newStatusCmd() *cobra.Command { cmdLapiStatus := &cobra.Command{ Use: "status", Short: "Check authentication to Local API (LAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return cli.status() }, } diff --git a/cmd/crowdsec-cli/lapi_test.go b/cmd/crowdsec-cli/lapi_test.go new file mode 100644 index 00000000000..018ecad8118 --- /dev/null +++ b/cmd/crowdsec-cli/lapi_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" +) + +func TestPrepareAPIURL_NoProtocol(t *testing.T) { + url, err := prepareAPIURL(nil, "localhost:81") + require.NoError(t, err) + assert.Equal(t, "http://localhost:81/", url.String()) +} + +func TestPrepareAPIURL_Http(t *testing.T) { + url, err := prepareAPIURL(nil, "http://localhost:81") + require.NoError(t, err) + assert.Equal(t, "http://localhost:81/", url.String()) +} + +func TestPrepareAPIURL_Https(t *testing.T) { + url, err := prepareAPIURL(nil, "https://localhost:81") + require.NoError(t, err) + assert.Equal(t, "https://localhost:81/", url.String()) +} + +func TestPrepareAPIURL_UnixSocket(t *testing.T) { + url, err := prepareAPIURL(nil, "/path/socket") + require.NoError(t, err) + assert.Equal(t, "/path/socket/", url.String()) +} + +func TestPrepareAPIURL_Empty(t *testing.T) { + _, err := prepareAPIURL(nil, "") + require.Error(t, err) +} + +func TestPrepareAPIURL_Empty_ConfigOverride(t *testing.T) { + url, err := prepareAPIURL(&csconfig.LocalApiClientCfg{ + Credentials: &csconfig.ApiCredentialsCfg{ + URL: "localhost:80", + }, + }, "") + require.NoError(t, err) + assert.Equal(t, "http://localhost:80/", url.String()) +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index df225c06f7f..1457fb5a0cc 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -318,8 +318,8 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri if apiURL == "" { if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { apiURL = clientCfg.Credentials.URL - } else if serverCfg != nil && serverCfg.ListenURI != "" { - apiURL = "http://" + serverCfg.ListenURI + } else if serverCfg.ClientURL() != "" { + apiURL = serverCfg.ClientURL() } else { return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") } diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index 591afe0d303..fe899b000af 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -22,8 +22,7 @@ def test_missing_key_file(crowdsec, flavor): } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: - # XXX: this message appears twice, is that normal? - cs.wait_for_log("*while starting API server: missing TLS key file*") + cs.wait_for_log("*local API server stopped with error: missing TLS key file*") def test_missing_cert_file(crowdsec, flavor): @@ -35,7 +34,7 @@ def test_missing_cert_file(crowdsec, flavor): } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: - cs.wait_for_log("*while starting API server: missing TLS cert file*") + cs.wait_for_log("*local API server stopped with error: missing TLS cert file*") def test_tls_missing_ca(crowdsec, flavor, certs_dir): diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 2ead10cf6da..6ee17fa5e72 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -70,9 +70,14 @@ func (t *JWTTransport) refreshJwtToken() error { req.Header.Add("Content-Type", "application/json") + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + client := &http.Client{ Transport: &retryRoundTripper{ - next: http.DefaultTransport, + next: transport, maxAttempts: 5, withBackOff: true, retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusInternalServerError}, @@ -153,7 +158,7 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) req.Header.Add("User-Agent", t.UserAgent) } - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.Token)) + req.Header.Add("Authorization", "Bearer "+t.Token) return req, nil } @@ -166,7 +171,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } if log.GetLevel() >= log.TraceLevel { - //requestToDump := cloneRequest(req) + // requestToDump := cloneRequest(req) dump, _ := httputil.DumpRequest(req, true) log.Tracef("req-jwt: %s", string(dump)) } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index b487f68a698..e0e521d6a6f 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -5,8 +5,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "net" "net/http" "net/url" + "strings" "github.com/golang-jwt/jwt/v4" @@ -67,12 +69,18 @@ func NewClient(config *Config) (*ApiClient, error) { MachineID: &config.MachineID, Password: &config.Password, Scenarios: config.Scenarios, - URL: config.URL, UserAgent: config.UserAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, } + transport, baseURL := createTransport(config.URL) + if transport != nil { + t.Transport = transport + } + + t.URL = baseURL + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} tlsconfig.RootCAs = CaCertPool @@ -84,7 +92,7 @@ func NewClient(config *Config) (*ApiClient, error) { ht.TLSClientConfig = &tlsconfig } - c := &ApiClient{client: t.Client(), BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} + c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -98,23 +106,29 @@ func NewClient(config *Config) (*ApiClient, error) { } func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(URL) + if client == nil { client = &http.Client{} - if ht, ok := http.DefaultTransport.(*http.Transport); ok { - tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} - tlsconfig.RootCAs = CaCertPool + if transport != nil { + client.Transport = transport + } else { + if ht, ok := http.DefaultTransport.(*http.Transport); ok { + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + tlsconfig.RootCAs = CaCertPool - if Cert != nil { - tlsconfig.Certificates = []tls.Certificate{*Cert} - } + if Cert != nil { + tlsconfig.Certificates = []tls.Certificate{*Cert} + } - ht.TLSClientConfig = &tlsconfig - client.Transport = ht + ht.TLSClientConfig = &tlsconfig + client.Transport = ht + } } } - c := &ApiClient{client: client, BaseURL: URL, UserAgent: userAgent, URLPrefix: prefix} + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: prefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -128,18 +142,26 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt } func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(config.URL) + if client == nil { client = &http.Client{} - } + if transport != nil { + client.Transport = transport + } else { + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + if Cert != nil { + tlsconfig.RootCAs = CaCertPool + tlsconfig.Certificates = []tls.Certificate{*Cert} + } - tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} - if Cert != nil { - tlsconfig.RootCAs = CaCertPool - tlsconfig.Certificates = []tls.Certificate{*Cert} + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + } + } else if client.Transport == nil && transport != nil { + client.Transport = transport } - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig - c := &ApiClient{client: client, BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -158,11 +180,31 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { return c, nil } +func createTransport(url *url.URL) (*http.Transport, *url.URL) { + urlString := url.String() + + // TCP transport + if !strings.HasPrefix(urlString, "/") { + return nil, url + } + + // Unix transport + url.Path = "/" + url.Host = "unix" + url.Scheme = "http" + + return &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", strings.TrimSuffix(urlString, "/")) + }, + }, url +} + type Response struct { Response *http.Response - //add our pagination stuff - //NextPage int - //... + // add our pagination stuff + // NextPage int + // ... } func newResponse(r *http.Response) *Response { @@ -170,14 +212,14 @@ func newResponse(r *http.Response) *Response { } type ListOpts struct { - //Page int - //PerPage int + // Page int + // PerPage int } type DeleteOpts struct { - //?? + // ?? } type AddOpts struct { - //?? + // ?? } diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index dc6eae16926..d3296c4b67f 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -3,10 +3,13 @@ package apiclient import ( "context" "fmt" + "net" "net/http" "net/http/httptest" "net/url" + "path" "runtime" + "strings" "testing" log "github.com/sirupsen/logrus" @@ -34,12 +37,50 @@ func setupWithPrefix(urlPrefix string) (*http.ServeMux, string, func()) { apiHandler := http.NewServeMux() apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) - // server is a test HTTP server used to provide mock API responses. server := httptest.NewServer(apiHandler) return mux, server.URL, server.Close } +// toUNCPath converts a Windows file path to a UNC path. +// This is necessary because the Go http package does not support Windows file paths. +func toUNCPath(path string) (string, error) { + colonIdx := strings.Index(path, ":") + if colonIdx == -1 { + return "", fmt.Errorf("invalid path format, missing drive letter: %s", path) + } + + // URL parsing does not like backslashes + remaining := strings.ReplaceAll(path[colonIdx+1:], "\\", "/") + uncPath := "//localhost/" + path[:colonIdx] + "$" + remaining + + return uncPath, nil +} + +func setupUnixSocketWithPrefix(socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { + var err error + if runtime.GOOS == "windows" { + socket, err = toUNCPath(socket) + if err != nil { + log.Fatalf("converting to UNC path: %s", err) + } + } + + mux = http.NewServeMux() + baseURLPath := "/" + urlPrefix + + apiHandler := http.NewServeMux() + apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) + + server := httptest.NewUnstartedServer(apiHandler) + l, _ := net.Listen("unix", socket) + _ = server.Listener.Close() + server.Listener = l + server.Start() + + return mux, socket, server.Close +} + func testMethod(t *testing.T, r *http.Request, want string) { t.Helper() assert.Equal(t, want, r.Method) @@ -77,6 +118,49 @@ func TestNewClientOk(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) } +func TestNewClientOk_UnixSocket(t *testing.T) { + tmpDir := t.TempDir() + socket := path.Join(tmpDir, "socket") + + mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + defer teardown() + + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + t.Fatalf("new api client: %s", err) + } + /*mock login*/ + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + }) + + _, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + if err != nil { + t.Fatalf("test Unable to list alerts : %+v", err) + } + + if resp.Response.StatusCode != http.StatusOK { + t.Fatalf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusCreated) + } +} + func TestNewClientKo(t *testing.T) { mux, urlx, teardown := setup() defer teardown() @@ -131,6 +215,33 @@ func TestNewDefaultClient(t *testing.T) { log.Printf("err-> %s", err) } +func TestNewDefaultClient_UnixSocket(t *testing.T) { + tmpDir := t.TempDir() + socket := path.Join(tmpDir, "socket") + + mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + defer teardown() + + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + client, err := NewDefaultClient(apiURL, "/v1", "", nil) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + }) + + _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + assert.Contains(t, err.Error(), `performing request: API error: brr`) + log.Printf("err-> %s", err) +} + func TestNewClientRegisterKO(t *testing.T) { apiURL, err := url.Parse("http://127.0.0.1:4242/") require.NoError(t, err) @@ -143,10 +254,10 @@ func TestNewClientRegisterKO(t *testing.T) { VersionPrefix: "v1", }, &http.Client{}) - if runtime.GOOS != "windows" { - cstest.RequireErrorContains(t, err, "dial tcp 127.0.0.1:4242: connect: connection refused") - } else { + if runtime.GOOS == "windows" { cstest.RequireErrorContains(t, err, " No connection could be made because the target machine actively refused it.") + } else { + cstest.RequireErrorContains(t, err, "dial tcp 127.0.0.1:4242: connect: connection refused") } } @@ -178,6 +289,41 @@ func TestNewClientRegisterOK(t *testing.T) { log.Printf("->%T", client) } +func TestNewClientRegisterOK_UnixSocket(t *testing.T) { + log.SetLevel(log.TraceLevel) + + tmpDir := t.TempDir() + socket := path.Join(tmpDir, "socket") + + mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + defer teardown() + + /*mock login*/ + mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + client, err := RegisterClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiURL, + VersionPrefix: "v1", + }, &http.Client{}) + if err != nil { + t.Fatalf("while registering client : %s", err) + } + + log.Printf("->%T", client) +} + func TestNewClientBadAnswer(t *testing.T) { log.SetLevel(log.TraceLevel) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 19a0085d2dc..e42ad9a98fa 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -32,6 +32,7 @@ const keyLength = 32 type APIServer struct { URL string + UnixSocket string TLS *csconfig.TLSCfg dbClient *database.Client logFile string @@ -66,7 +67,7 @@ func recoverFromPanic(c *gin.Context) { // because of https://github.com/golang/net/blob/39120d07d75e76f0079fe5d27480bcb965a21e4c/http2/server.go // and because it seems gin doesn't handle those neither, we need to "hand define" some errors to properly catch them if strErr, ok := err.(error); ok { - //stolen from http2/server.go in x/net + // stolen from http2/server.go in x/net var ( errClientDisconnected = errors.New("client disconnected") errClosedBody = errors.New("body closed by handler") @@ -124,10 +125,10 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro logger := &lumberjack.Logger{ Filename: logFile, - MaxSize: 500, //megabytes + MaxSize: 500, // megabytes MaxBackups: 3, - MaxAge: 28, //days - Compress: true, //disabled by default + MaxAge: 28, // days + Compress: true, // disabled by default } if config.LogMaxSize != 0 { @@ -176,6 +177,13 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { router.ForwardedByClientIP = false + // set the remore address of the request to 127.0.0.1 if it comes from a unix socket + router.Use(func(c *gin.Context) { + if c.Request.RemoteAddr == "@" { + c.Request.RemoteAddr = "127.0.0.1:65535" + } + }) + if config.TrustedProxies != nil && config.UseForwardedForHeaders { if err = router.SetTrustedProxies(*config.TrustedProxies); err != nil { return nil, fmt.Errorf("while setting trusted_proxies: %w", err) @@ -223,8 +231,8 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { } var ( - apiClient *apic - papiClient *Papi + apiClient *apic + papiClient *Papi ) controller.AlertsAddChan = nil @@ -267,6 +275,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { return &APIServer{ URL: config.ListenURI, + UnixSocket: config.ListenSocket, TLS: config.TLS, logFile: logFile, dbClient: dbClient, @@ -317,11 +326,11 @@ func (s *APIServer) Run(apiReady chan bool) error { return nil }) - //csConfig.API.Server.ConsoleConfig.ShareCustomScenarios + // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios if s.apic.apiClient.IsEnrolled() { if s.consoleConfig.IsPAPIEnabled() { if s.papi.URL != "" { - log.Infof("Starting PAPI decision receiver") + log.Info("Starting PAPI decision receiver") s.papi.pullTomb.Go(func() error { if err := s.papi.Pull(); err != nil { log.Errorf("papi pull: %s", err) @@ -353,29 +362,31 @@ func (s *APIServer) Run(apiReady chan bool) error { }) } - s.httpServerTomb.Go(func() error { s.listenAndServeURL(apiReady); return nil }) + s.httpServerTomb.Go(func() error { + return s.listenAndServeLAPI(apiReady) + }) + + if err := s.httpServerTomb.Wait(); err != nil { + return fmt.Errorf("local API server stopped with error: %w", err) + } return nil } -// listenAndServeURL starts the http server and blocks until it's closed +// listenAndServeLAPI starts the http server and blocks until it's closed // it also updates the URL field with the actual address the server is listening on // it's meant to be run in a separate goroutine -func (s *APIServer) listenAndServeURL(apiReady chan bool) { - serverError := make(chan error, 1) - - go func() { - listener, err := net.Listen("tcp", s.URL) - if err != nil { - serverError <- fmt.Errorf("listening on %s: %w", s.URL, err) - return - } - - s.URL = listener.Addr().String() - log.Infof("CrowdSec Local API listening on %s", s.URL) - apiReady <- true +func (s *APIServer) listenAndServeLAPI(apiReady chan bool) error { + var ( + tcpListener net.Listener + unixListener net.Listener + err error + serverError = make(chan error, 2) + listenerClosed = make(chan struct{}) + ) - if s.TLS != nil && (s.TLS.CertFilePath != "" || s.TLS.KeyFilePath != "") { + startServer := func(listener net.Listener, canTLS bool) { + if canTLS && s.TLS != nil && (s.TLS.CertFilePath != "" || s.TLS.KeyFilePath != "") { if s.TLS.KeyFilePath == "" { serverError <- errors.New("missing TLS key file") return @@ -391,25 +402,71 @@ func (s *APIServer) listenAndServeURL(apiReady chan bool) { err = s.httpServer.Serve(listener) } - if err != nil && err != http.ErrServerClosed { - serverError <- fmt.Errorf("while serving local API: %w", err) + switch { + case errors.Is(err, http.ErrServerClosed): + break + case err != nil: + serverError <- err + } + } + + // Starting TCP listener + go func() { + if s.URL == "" { + return + } + + tcpListener, err = net.Listen("tcp", s.URL) + if err != nil { + serverError <- fmt.Errorf("listening on %s: %w", s.URL, err) + return + } + + log.Infof("CrowdSec Local API listening on %s", s.URL) + startServer(tcpListener, true) + }() + + // Starting Unix socket listener + go func() { + if s.UnixSocket == "" { + return + } + + _ = os.RemoveAll(s.UnixSocket) + + unixListener, err = net.Listen("unix", s.UnixSocket) + if err != nil { + serverError <- fmt.Errorf("while creating unix listener: %w", err) return } + + log.Infof("CrowdSec Local API listening on Unix socket %s", s.UnixSocket) + startServer(unixListener, false) }() + apiReady <- true + select { case err := <-serverError: - log.Fatalf("while starting API server: %s", err) + return err case <-s.httpServerTomb.Dying(): - log.Infof("Shutting down API server") - // do we need a graceful shutdown here? + log.Info("Shutting down API server") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := s.httpServer.Shutdown(ctx); err != nil { - log.Errorf("while shutting down http server: %s", err) + log.Errorf("while shutting down http server: %v", err) + } + + close(listenerClosed) + case <-listenerClosed: + if s.UnixSocket != "" { + _ = os.RemoveAll(s.UnixSocket) } } + + return nil } func (s *APIServer) Close() { @@ -437,7 +494,7 @@ func (s *APIServer) Shutdown() error { } } - //close io.writer logger given to gin + // close io.writer logger given to gin if pipe, ok := gin.DefaultErrorWriter.(*io.PipeWriter); ok { pipe.Close() } diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index ad183e4ba80..19dbf8d0cfa 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -174,7 +174,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { // if coming from cscli, alert already has decisions if len(alert.Decisions) != 0 { - //alert already has a decision (cscli decisions add etc.), generate uuid here + // alert already has a decision (cscli decisions add etc.), generate uuid here for _, decision := range alert.Decisions { decision.UUID = uuid.NewString() } @@ -323,12 +323,13 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { var err error incomingIP := gctx.ClientIP() - if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { + if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } decisionIDStr := gctx.Param("alert_id") + decisionID, err := strconv.Atoi(decisionIDStr) if err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) @@ -349,7 +350,7 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { // DeleteAlerts deletes alerts from the database based on the specified filter func (c *Controller) DeleteAlerts(gctx *gin.Context) { incomingIP := gctx.ClientIP() - if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { + if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go index 6f14dd9204e..2fcf8099e67 100644 --- a/pkg/apiserver/controllers/v1/utils.go +++ b/pkg/apiserver/controllers/v1/utils.go @@ -2,7 +2,9 @@ package v1 import ( "errors" + "net" "net/http" + "strings" jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" @@ -25,6 +27,14 @@ func getBouncerFromContext(ctx *gin.Context) (*ent.Bouncer, error) { return bouncerInfo, nil } +func isUnixSocket(c *gin.Context) bool { + if localAddr, ok := c.Request.Context().Value(http.LocalAddrContextKey).(net.Addr); ok { + return strings.HasPrefix(localAddr.Network(), "unix") + } + + return false +} + func getMachineIDFromContext(ctx *gin.Context) (string, error) { claims := jwt.ExtractClaims(ctx) if claims == nil { @@ -47,8 +57,16 @@ func getMachineIDFromContext(ctx *gin.Context) (string, error) { func (c *Controller) AbortRemoteIf(option bool) gin.HandlerFunc { return func(gctx *gin.Context) { + if !option { + return + } + + if isUnixSocket(gctx) { + return + } + incomingIP := gctx.ClientIP() - if option && incomingIP != "127.0.0.1" && incomingIP != "::1" { + if incomingIP != "127.0.0.1" && incomingIP != "::1" { gctx.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) gctx.Abort() } diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 4e273371bfe..4561b8f7789 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -19,7 +19,7 @@ import ( const ( APIKeyHeader = "X-Api-Key" BouncerContextKey = "bouncer_info" - dummyAPIKeySize = 54 + dummyAPIKeySize = 54 // max allowed by bcrypt 72 = 54 bytes in base64 ) @@ -82,10 +82,10 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) - //This is likely not the proper way, but isNotFound does not seem to work + // This is likely not the proper way, but isNotFound does not seem to work if err != nil && strings.Contains(err.Error(), "bouncer not found") { - //Because we have a valid cert, automatically create the bouncer in the database if it does not exist - //Set a random API key, but it will never be used + // Because we have a valid cert, automatically create the bouncer in the database if it does not exist + // Set a random API key, but it will never be used apiKey, err := GenerateAPIKey(dummyAPIKeySize) if err != nil { logger.Errorf("error generating mock api key: %s", err) @@ -100,11 +100,11 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } } else if err != nil { - //error while selecting bouncer + // error while selecting bouncer logger.Errorf("while selecting bouncers: %s", err) return nil } else if bouncer.AuthType != types.TlsAuthType { - //bouncer was found in DB + // bouncer was found in DB logger.Errorf("bouncer isn't allowed to auth by TLS") return nil } @@ -139,8 +139,10 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return func(c *gin.Context) { var bouncer *ent.Bouncer + clientIP := c.ClientIP() + logger := log.WithFields(log.Fields{ - "ip": c.ClientIP(), + "ip": clientIP, }) if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { @@ -152,6 +154,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { if bouncer == nil { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() + return } @@ -160,7 +163,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { }) if bouncer.IPAddress == "" { - if err := a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -169,11 +172,11 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } } - //Don't update IP on HEAD request, as it's used by the appsec to check the validity of the API key provided - if bouncer.IPAddress != c.ClientIP() && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { - log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, c.ClientIP(), bouncer.IPAddress) + // Don't update IP on HEAD request, as it's used by the appsec to check the validity of the API key provided + if bouncer.IPAddress != clientIP && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { + log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, clientIP, bouncer.IPAddress) - if err := a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -199,6 +202,5 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } c.Set(BouncerContextKey, bouncer) - c.Next() } } diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 6fe053713bc..735c5f058cb 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -61,6 +61,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { if j.TlsAuth == nil { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() + return nil, errors.New("TLS auth is not configured") } @@ -76,7 +77,8 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { if !validCert { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() - return nil, fmt.Errorf("failed cert authentication") + + return nil, errors.New("failed cert authentication") } ret.machineID = fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) @@ -85,9 +87,9 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { Where(machine.MachineId(ret.machineID)). First(j.DbClient.CTX) if ent.IsNotFound(err) { - //Machine was not found, let's create it + // Machine was not found, let's create it log.Infof("machine %s not found, create it", ret.machineID) - //let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) + // let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) pwd, err := GenerateAPIKey(dummyAPIKeySize) if err != nil { log.WithFields(log.Fields{ @@ -95,7 +97,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { "cn": extractedCN, }).Errorf("error generating password: %s", err) - return nil, fmt.Errorf("error generating password") + return nil, errors.New("error generating password") } password := strfmt.Password(pwd) @@ -110,6 +112,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { if ret.clientMachine.AuthType != types.TlsAuthType { return nil, fmt.Errorf("machine %s attempted to auth with TLS cert but it is configured to use %s", ret.machineID, ret.clientMachine.AuthType) } + ret.machineID = ret.clientMachine.MachineId } @@ -213,18 +216,20 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { } } + clientIP := c.ClientIP() + if auth.clientMachine.IpAddress == "" { - err = j.DbClient.UpdateMachineIP(c.ClientIP(), auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication } } - if auth.clientMachine.IpAddress != c.ClientIP() && auth.clientMachine.IpAddress != "" { - log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, c.ClientIP(), auth.clientMachine.IpAddress) + if auth.clientMachine.IpAddress != clientIP && auth.clientMachine.IpAddress != "" { + log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, clientIP, auth.clientMachine.IpAddress) - err = j.DbClient.UpdateMachineIP(c.ClientIP(), auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.clientMachine.MachineId, err) return nil, jwt.ErrFailedAuthentication @@ -233,13 +238,14 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { useragent := strings.Split(c.Request.UserAgent(), "/") if len(useragent) != 2 { - log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), c.ClientIP()) + log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), clientIP) return nil, jwt.ErrFailedAuthentication } if err := j.DbClient.UpdateMachineVersion(useragent[1], auth.clientMachine.ID); err != nil { log.Errorf("unable to update machine '%s' version '%s': %s", auth.clientMachine.MachineId, useragent[1], err) - log.Errorf("bad user agent from : %s", c.ClientIP()) + log.Errorf("bad user agent from : %s", clientIP) + return nil, jwt.ErrFailedAuthentication } @@ -323,8 +329,9 @@ func NewJWT(dbClient *database.Client) (*JWT, error) { errInit := ret.MiddlewareInit() if errInit != nil { - return &JWT{}, fmt.Errorf("authMiddleware.MiddlewareInit() Error:" + errInit.Error()) + return &JWT{}, errors.New("authMiddleware.MiddlewareInit() Error:" + errInit.Error()) } + jwtMiddleware.Middleware = ret return jwtMiddleware, nil diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 7fd1f588897..4d10690735d 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -141,12 +141,25 @@ func (l *LocalApiClientCfg) Load() error { } if l.Credentials != nil && l.Credentials.URL != "" { - if !strings.HasSuffix(l.Credentials.URL, "/") { + // don't append a trailing slash if the URL is a unix socket + if strings.HasPrefix(l.Credentials.URL, "http") && !strings.HasSuffix(l.Credentials.URL, "/") { l.Credentials.URL += "/" } } - if l.Credentials.Login != "" && (l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") { + // is the configuration asking for client authentication via TLS? + credTLSClientAuth := l.Credentials.CertPath != "" || l.Credentials.KeyPath != "" + + // is the configuration asking for TLS encryption and server authentication? + credTLS := credTLSClientAuth || l.Credentials.CACertPath != "" + + credSocket := strings.HasPrefix(l.Credentials.URL, "/") + + if credTLS && credSocket { + return errors.New("cannot use TLS with a unix socket") + } + + if credTLSClientAuth && l.Credentials.Login != "" { return errors.New("user/password authentication and TLS authentication are mutually exclusive") } @@ -187,10 +200,10 @@ func (l *LocalApiClientCfg) Load() error { return nil } -func (lapiCfg *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { +func (c *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { trustedIPs := make([]net.IPNet, 0) - for _, ip := range lapiCfg.TrustedIPs { + for _, ip := range c.TrustedIPs { cidr := toValidCIDR(ip) _, ipNet, err := net.ParseCIDR(cidr) @@ -225,6 +238,7 @@ type CapiWhitelist struct { type LocalApiServerCfg struct { Enable *bool `yaml:"enable"` ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 + ListenSocket string `yaml:"listen_socket,omitempty"` TLS *TLSCfg `yaml:"tls"` DbConfig *DatabaseCfg `yaml:"-"` LogDir string `yaml:"-"` @@ -248,6 +262,22 @@ type LocalApiServerCfg struct { CapiWhitelists *CapiWhitelist `yaml:"-"` } +func (c *LocalApiServerCfg) ClientURL() string { + if c == nil { + return "" + } + + if c.ListenSocket != "" { + return c.ListenSocket + } + + if c.ListenURI != "" { + return "http://" + c.ListenURI + } + + return "" +} + func (c *Config) LoadAPIServer(inCli bool) error { if c.DisableAPI { log.Warning("crowdsec local API is disabled from flag") @@ -255,7 +285,9 @@ func (c *Config) LoadAPIServer(inCli bool) error { if c.API.Server == nil { log.Warning("crowdsec local API is disabled") + c.DisableAPI = true + return nil } @@ -266,6 +298,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { if !*c.API.Server.Enable { log.Warning("crowdsec local API is disabled because 'enable' is set to false") + c.DisableAPI = true } @@ -273,8 +306,8 @@ func (c *Config) LoadAPIServer(inCli bool) error { return nil } - if c.API.Server.ListenURI == "" { - return errors.New("no listen_uri specified") + if c.API.Server.ListenURI == "" && c.API.Server.ListenSocket == "" { + return errors.New("no listen_uri or listen_socket specified") } // inherit log level from common, then api->server @@ -393,21 +426,21 @@ func parseCapiWhitelists(fd io.Reader) (*CapiWhitelist, error) { return ret, nil } -func (s *LocalApiServerCfg) LoadCapiWhitelists() error { - if s.CapiWhitelistsPath == "" { +func (c *LocalApiServerCfg) LoadCapiWhitelists() error { + if c.CapiWhitelistsPath == "" { return nil } - fd, err := os.Open(s.CapiWhitelistsPath) + fd, err := os.Open(c.CapiWhitelistsPath) if err != nil { return fmt.Errorf("while opening capi whitelist file: %w", err) } defer fd.Close() - s.CapiWhitelists, err = parseCapiWhitelists(fd) + c.CapiWhitelists, err = parseCapiWhitelists(fd) if err != nil { - return fmt.Errorf("while parsing capi whitelist file '%s': %w", s.CapiWhitelistsPath, err) + return fmt.Errorf("while parsing capi whitelist file '%s': %w", c.CapiWhitelistsPath, err) } return nil diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 233340e500f..1b7940615ed 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -32,20 +32,20 @@ teardown() { } @test "lapi (no .api.server.listen_uri)" { - rune -0 config_set 'del(.api.server.listen_uri)' + rune -0 config_set 'del(.api.server.listen_socket) | del(.api.server.listen_uri)' rune -1 "${CROWDSEC}" -no-cs - assert_stderr --partial "no listen_uri specified" + assert_stderr --partial "no listen_uri or listen_socket specified" } @test "lapi (bad .api.server.listen_uri)" { - rune -0 config_set '.api.server.listen_uri="127.0.0.1:-80"' + rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:-80"' rune -1 "${CROWDSEC}" -no-cs - assert_stderr --partial "while starting API server: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" + assert_stderr --partial "local API server stopped with error: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" } @test "lapi (listen on random port)" { config_set '.common.log_media="stdout"' - rune -0 config_set '.api.server.listen_uri="127.0.0.1:0"' + rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "${CROWDSEC}" -no-cs } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 03f0132ea63..4c7ce7fbc2c 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -100,10 +100,14 @@ teardown() { # check that LAPI configuration is loaded (human and json, not shows in raw) + sock=$(config_get '.api.server.listen_socket') + rune -0 cscli config show -o human assert_line --regexp ".*- URL +: http://127.0.0.1:8080/" assert_line --regexp ".*- Login +: githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?" assert_line --regexp ".*- Credentials File +: .*/local_api_credentials.yaml" + assert_line --regexp ".*- Listen URL +: 127.0.0.1:8080" + assert_line --regexp ".*- Listen Socket +: $sock" rune -0 cscli config show -o json rune -0 jq -c '.API.Client.Credentials | [.url,.login[0:32]]' <(output) @@ -212,7 +216,6 @@ teardown() { assert_stderr --partial "Loaded credentials from" assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial " on http://127.0.0.1:8080/" assert_stderr --partial "You can successfully interact with Local API (LAPI)" } diff --git a/test/bats/09_socket.bats b/test/bats/09_socket.bats new file mode 100644 index 00000000000..f770abaad2e --- /dev/null +++ b/test/bats/09_socket.bats @@ -0,0 +1,158 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + sockdir=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp -u) + export sockdir + mkdir -p "$sockdir" + socket="$sockdir/crowdsec_api.sock" + export socket + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + export LOCAL_API_CREDENTIALS +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + config_set ".api.server.listen_socket=strenv(socket)" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli - connects from existing machine with socket" { + config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" + + ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --regexp "Trying to authenticate with username .* on $socket" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "crowdsec - listen on both socket and TCP" { + ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" + + rune -0 cscli lapi status + assert_stderr --regexp "Trying to authenticate with username .* on $socket" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli - authenticate new machine with socket" { + # verify that if a listen_uri and a socket are set, the socket is used + # by default when creating a local machine. + + rune -0 cscli machines delete "$(cscli machines list -o json | jq -r '.[].machineId')" + + # this one should be using the socket + rune -0 cscli machines add --auto --force + + using=$(config_get "$LOCAL_API_CREDENTIALS" ".url") + + assert [ "$using" = "$socket" ] + + # disable the agent because it counts as a first authentication + config_disable_agent + ./instance-crowdsec start + + # the machine does not have an IP yet + + rune -0 cscli machines list -o json + rune -0 jq -r '.[].ipAddress' <(output) + assert_output null + + # upon first authentication, it's assigned to localhost + + rune -0 cscli lapi status + + rune -0 cscli machines list -o json + rune -0 jq -r '.[].ipAddress' <(output) + assert_output 127.0.0.1 +} + +bouncer_http() { + URI="$1" + curl -fs -H "X-Api-Key: $API_KEY" "http://localhost:8080$URI" +} + +bouncer_socket() { + URI="$1" + curl -fs -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://localhost$URI" +} + +@test "lapi - connects from existing bouncer with socket" { + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + # the bouncer does not have an IP yet + + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[].ip_address' <(output) + assert_output "" + + # upon first authentication, it's assigned to localhost + + rune -0 bouncer_socket '/v1/decisions' + assert_output 'null' + refute_stderr + + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[].ip_address' <(output) + assert_output "127.0.0.1" + + # we can still use TCP of course + + rune -0 bouncer_http '/v1/decisions' + assert_output 'null' + refute_stderr +} + +@test "lapi - listen on socket only" { + config_set "del(.api.server.listen_uri)" + + mkdir -p "$sockdir" + + # agent is not able to connect right now + config_disable_agent + ./instance-crowdsec start + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + # now we can't + + rune -1 cscli lapi status + assert_stderr --partial "connection refused" + + rune -7 bouncer_http '/v1/decisions' + refute_output + refute_stderr + + # here we can + + config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" + + rune -0 cscli lapi status + + rune -0 bouncer_socket '/v1/decisions' + assert_output 'null' + refute_stderr +} diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 311293ca70c..6909c89cb1f 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -120,7 +120,50 @@ teardown() { rune -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) assert_output '[1,"localhost@127.0.0.1",true,"127.0.0.1","tls"]' - cscli machines delete localhost@127.0.0.1 + rune -0 cscli machines delete localhost@127.0.0.1 +} + +@test "a machine can still connect with a unix socket, no TLS" { + sock=$(config_get '.api.server.listen_socket') + export sock + + # an agent is a machine too + config_disable_agent + ./instance-crowdsec start + + rune -0 cscli machines add with-socket --auto --force + rune -0 cscli lapi status + + rune -0 cscli machines list -o json + rune -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) + assert_output '[1,"with-socket",true,"127.0.0.1","password"]' + + # TLS cannot be used with a unix socket + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" + ' + + rune -1 cscli lapi status + assert_stderr --partial "loading api client: cannot use TLS with a unix socket" + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + del(.ca_cert_path) | + .key_path=strenv(tmpdir) + "/agent-key.pem" + ' + + rune -1 cscli lapi status + assert_stderr --partial "loading api client: cannot use TLS with a unix socket" + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + del(.key_path) | + .cert_path=strenv(tmpdir) + "/agent.pem" + ' + + rune -1 cscli lapi status + assert_stderr --partial "loading api client: cannot use TLS with a unix socket" + + rune -0 cscli machines delete with-socket } @test "invalid cert for agent" { diff --git a/test/lib/config/config-global b/test/lib/config/config-global index 68346c18875..0caf0591f7d 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -58,6 +58,7 @@ config_prepare() { # remove trailing slash from CONFIG_DIR # since it's assumed to be missing during the tests yq e -i ' + .api.server.listen_socket="/run/crowdsec.sock" | .config_paths.config_dir |= sub("/$", "") ' "${CONFIG_DIR}/config.yaml" } diff --git a/test/lib/config/config-local b/test/lib/config/config-local index e3b7bc685d4..e5cfaf997be 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -57,7 +57,6 @@ config_generate() { cp ../config/profiles.yaml \ ../config/simulation.yaml \ - ../config/local_api_credentials.yaml \ ../config/online_api_credentials.yaml \ "${CONFIG_DIR}/" @@ -95,6 +94,7 @@ config_generate() { .db_config.db_path=strenv(DATA_DIR)+"/crowdsec.db" | .db_config.use_wal=true | .api.client.credentials_path=strenv(CONFIG_DIR)+"/local_api_credentials.yaml" | + .api.server.listen_socket=strenv(DATA_DIR)+"/crowdsec.sock" | .api.server.profiles_path=strenv(CONFIG_DIR)+"/profiles.yaml" | .api.server.console_path=strenv(CONFIG_DIR)+"/console.yaml" | del(.api.server.online_client) @@ -119,7 +119,8 @@ make_init_data() { ./bin/preload-hub-items - "$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --auto --force + # force TCP, the default would be unix socket + "$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --url http://127.0.0.1:8080 --auto --force mkdir -p "$LOCAL_INIT_DIR" From 742f5e8cdab3057ae04f68b445d0f9222bbbea40 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 14 Mar 2024 14:00:39 +0100 Subject: [PATCH 060/318] [appsec] delete api key header before processing the request (#2890) --- pkg/appsec/request.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index effb1828307..5b77f2285a3 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -320,6 +320,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR delete(r.Header, URIHeaderName) delete(r.Header, VerbHeaderName) delete(r.Header, UserAgentHeaderName) + delete(r.Header, APIKeyHeaderName) originalHTTPRequest := r.Clone(r.Context()) originalHTTPRequest.Body = io.NopCloser(bytes.NewBuffer(body)) From 06bebdeac78184c771fc077fccd9890e3493af63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:01:09 +0100 Subject: [PATCH 061/318] Bump google.golang.org/protobuf from 1.31.0 to 1.33.0 (#2893) Bumps google.golang.org/protobuf from 1.31.0 to 1.33.0. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a2b9c8618c6..22d52be1ca8 100644 --- a/go.mod +++ b/go.mod @@ -86,7 +86,7 @@ require ( golang.org/x/sys v0.15.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 08a454cf0f0..07bddaf8967 100644 --- a/go.sum +++ b/go.sum @@ -908,8 +908,8 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 7dd86e2b95d73e70d3799223bf4e33dd0cb18036 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 14 Mar 2024 14:02:53 +0100 Subject: [PATCH 062/318] add cron as a suggested package (#2799) --- debian/control | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/control b/debian/control index 4673284e7b4..0ee08b71f85 100644 --- a/debian/control +++ b/debian/control @@ -8,3 +8,4 @@ Package: crowdsec Architecture: any Description: Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviors. It also automatically benefits from our global community-wide IP reputation database Depends: coreutils +Suggests: cron From caca4032d18ab60a3b5c1437c81c29a3792cded9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:03:43 +0100 Subject: [PATCH 063/318] lapi: log error "can't sinchronize with console" only if papi is enabled (#2896) --- pkg/apiserver/apiserver.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index e42ad9a98fa..7989cfc1d97 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -250,8 +250,8 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller.AlertsAddChan = apiClient.AlertsAddChan - if apiClient.apiClient.IsEnrolled() { - if config.ConsoleConfig.IsPAPIEnabled() { + if config.ConsoleConfig.IsPAPIEnabled() { + if apiClient.apiClient.IsEnrolled() { log.Info("Machine is enrolled in the console, Loading PAPI Client") papiClient, err = NewPAPI(apiClient, dbClient, config.ConsoleConfig, *config.PapiLogLevel) @@ -260,9 +260,9 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { } controller.DecisionDeleteChan = papiClient.Channels.DeleteDecisionChannel + } else { + log.Error("Machine is not enrolled in the console, can't synchronize with the console") } - } else { - log.Errorf("Machine is not enrolled in the console, can't synchronize with the console") } } From a6b0e5838086530294adc640ef4fe731c31b6edb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:04:45 +0100 Subject: [PATCH 064/318] CI: bump github actions (#2895) --- .github/workflows/bats-hub.yml | 4 ++-- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/ci_release-drafter.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 4 ++-- .github/workflows/go-tests.yml | 4 ++-- .github/workflows/governance-bot.yaml | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 075480485ff..816234adf61 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -53,7 +53,7 @@ jobs: run: ./test/bin/collect-hub-coverage >> $GITHUB_ENV - name: "Create Parsers badge" - uses: schneegans/dynamic-badges-action@v1.6.0 + uses: schneegans/dynamic-badges-action@v1.7.0 if: ${{ github.ref == 'refs/heads/master' && github.repository_owner == 'crowdsecurity' }} with: auth: ${{ secrets.GIST_BADGES_SECRET }} @@ -64,7 +64,7 @@ jobs: color: ${{ env.SCENARIO_BADGE_COLOR }} - name: "Create Scenarios badge" - uses: schneegans/dynamic-badges-action@v1.6.0 + uses: schneegans/dynamic-badges-action@v1.7.0 if: ${{ github.ref == 'refs/heads/master' && github.repository_owner == 'crowdsecurity' }} with: auth: ${{ secrets.GIST_BADGES_SECRET }} diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 436eb0f04a4..8e890e0f8b9 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -77,7 +77,7 @@ jobs: if: ${{ always() }} - name: Upload crowdsec coverage to codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: ./coverage-bats.out flags: bats diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 7c6a6621de4..e116e0d2e24 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -40,7 +40,7 @@ jobs: - name: Build run: make windows_installer BUILD_RE2_WASM=1 - name: Upload MSI - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: path: crowdsec*msi name: crowdsec.msi diff --git a/.github/workflows/ci_release-drafter.yml b/.github/workflows/ci_release-drafter.yml index 2ccb6977cfd..0b8c9b386e6 100644 --- a/.github/workflows/ci_release-drafter.yml +++ b/.github/workflows/ci_release-drafter.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: # Drafts your next Release notes as Pull Requests are merged into "master" - - uses: release-drafter/release-drafter@v5 + - uses: release-drafter/release-drafter@v6 with: config-name: release-drafter.yml # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index bdc16e650f6..7c4e749ece7 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -68,7 +68,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) # - name: Autobuild - # uses: github/codeql-action/autobuild@v2 + # uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index efe16ed66d9..9dfcc089a63 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -48,13 +48,13 @@ jobs: cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - name: Upload unit coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: coverage.out flags: unit-windows - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.55 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 865b2782a63..61dadfc0b78 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -149,13 +149,13 @@ jobs: make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - name: Upload unit coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: coverage.out flags: unit-linux - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.55 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/governance-bot.yaml b/.github/workflows/governance-bot.yaml index 5c08cabf5d1..c9e73e7811a 100644 --- a/.github/workflows/governance-bot.yaml +++ b/.github/workflows/governance-bot.yaml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest steps: # Semantic versioning, lock to different version: v2, v2.0 or a commit hash. - - uses: BirthdayResearch/oss-governance-bot@v3 + - uses: BirthdayResearch/oss-governance-bot@v4 with: # You can use a PAT to post a comment/label/status so that it shows up as a user instead of github-actions github-token: ${{secrets.GITHUB_TOKEN}} # optional, default to '${{ github.token }}' From e9b0f3c54ed76cafa0a453c1417efa5846c99f5a Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Thu, 14 Mar 2024 15:36:47 +0000 Subject: [PATCH 065/318] wip: fix unix socket error (#2897) --- pkg/appsec/request.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 5b77f2285a3..a9eb0d372a4 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -342,6 +342,10 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR } var remoteAddrNormalized string + if r.RemoteAddr == "@" { + r.RemoteAddr = "127.0.0.1:65535" + } + // TODO we need to implement forwrded headers host, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { log.Errorf("Invalid appsec remote IP source %v: %s", r.RemoteAddr, err.Error()) From fd2bb8927cc8288bb6494f945abcdd67883f0905 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Fri, 15 Mar 2024 14:36:34 +0100 Subject: [PATCH 066/318] Fix rpm build (#2894) * fix rpm build --- rpm/SOURCES/crowdsec.unit.patch | 15 --------------- rpm/SPECS/crowdsec.spec | 6 ++---- 2 files changed, 2 insertions(+), 19 deletions(-) delete mode 100644 rpm/SOURCES/crowdsec.unit.patch diff --git a/rpm/SOURCES/crowdsec.unit.patch b/rpm/SOURCES/crowdsec.unit.patch deleted file mode 100644 index 5173beb2710..00000000000 --- a/rpm/SOURCES/crowdsec.unit.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- config/crowdsec.service-orig 2022-03-24 09:46:16.581681532 +0000 -+++ config/crowdsec.service 2022-03-24 09:46:28.761681532 +0000 -@@ -5,8 +5,8 @@ - [Service] - Type=notify - Environment=LC_ALL=C LANG=C --ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error --ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -+ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error -+ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml - #ExecStartPost=/bin/sleep 0.1 --ExecReload=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error -+ExecReload=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error - ExecReload=/bin/kill -HUP $MAINPID - Restart=always diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index f14df932590..60884dfd4e6 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -8,8 +8,7 @@ License: MIT URL: https://crowdsec.net Source0: https://github.com/crowdsecurity/%{name}/archive/v%(echo $VERSION).tar.gz Source1: 80-%{name}.preset -Patch0: crowdsec.unit.patch -Patch1: user.patch +Patch0: user.patch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: systemd @@ -32,7 +31,6 @@ Requires: crontabs %setup -q -T -b 0 %patch0 -%patch1 %build sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config/config.yaml @@ -53,7 +51,7 @@ mkdir -p %{buildroot}%{_libdir}/%{name}/plugins/ install -m 755 -D cmd/crowdsec/crowdsec %{buildroot}%{_bindir}/%{name} install -m 755 -D cmd/crowdsec-cli/cscli %{buildroot}%{_bindir}/cscli install -m 755 -D wizard.sh %{buildroot}/usr/share/crowdsec/wizard.sh -install -m 644 -D config/crowdsec.service %{buildroot}%{_unitdir}/%{name}.service +install -m 644 -D debian/crowdsec.service %{buildroot}%{_unitdir}/%{name}.service install -m 644 -D config/patterns/* -t %{buildroot}%{_sysconfdir}/crowdsec/patterns install -m 600 -D config/config.yaml %{buildroot}%{_sysconfdir}/crowdsec install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec From 2f490881637c28325e7567eb2f840a921137e921 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 18 Mar 2024 11:25:45 +0100 Subject: [PATCH 067/318] file acquisition: don't bubble error when tailed file disappears (#2903) * file acquisition: don't bubble error when tailed file disappears * don't call t.Kill() * lint (whitespace) --- pkg/acquisition/modules/file/file.go | 119 +++++++++++++++++++++------ 1 file changed, 96 insertions(+), 23 deletions(-) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index efc89715296..ee3c844a556 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -3,6 +3,7 @@ package fileacquisition import ( "bufio" "compress/gzip" + "errors" "fmt" "io" "net/url" @@ -16,7 +17,6 @@ import ( "github.com/fsnotify/fsnotify" "github.com/nxadm/tail" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -63,6 +63,7 @@ func (f *FileSource) GetUuid() string { func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { f.config = FileConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &f.config) if err != nil { return fmt.Errorf("cannot parse FileAcquisition configuration: %w", err) @@ -77,7 +78,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { } if len(f.config.Filenames) == 0 { - return fmt.Errorf("no filename or filenames configuration provided") + return errors.New("no filename or filenames configuration provided") } if f.config.Mode == "" { @@ -93,6 +94,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { if err != nil { return fmt.Errorf("could not compile regexp %s: %w", exclude, err) } + f.exclude_regexps = append(f.exclude_regexps, re) } @@ -123,56 +125,68 @@ func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLeve if f.config.ForceInotify { directory := filepath.Dir(pattern) f.logger.Infof("Force add watch on %s", directory) + if !f.watchedDirectories[directory] { err = f.watcher.Add(directory) if err != nil { f.logger.Errorf("Could not create watch on directory %s : %s", directory, err) continue } + f.watchedDirectories[directory] = true } } + files, err := filepath.Glob(pattern) if err != nil { return fmt.Errorf("glob failure: %w", err) } + if len(files) == 0 { f.logger.Warnf("No matching files for pattern %s", pattern) continue } - for _, file := range files { - //check if file is excluded + for _, file := range files { + // check if file is excluded excluded := false + for _, pattern := range f.exclude_regexps { if pattern.MatchString(file) { excluded = true + f.logger.Infof("Skipping file %s as it matches exclude pattern %s", file, pattern) + break } } + if excluded { continue } - if files[0] != pattern && f.config.Mode == configuration.TAIL_MODE { //we have a glob pattern + + if files[0] != pattern && f.config.Mode == configuration.TAIL_MODE { // we have a glob pattern directory := filepath.Dir(file) f.logger.Debugf("Will add watch to directory: %s", directory) - if !f.watchedDirectories[directory] { + if !f.watchedDirectories[directory] { err = f.watcher.Add(directory) if err != nil { f.logger.Errorf("Could not create watch on directory %s : %s", directory, err) continue } + f.watchedDirectories[directory] = true } else { f.logger.Debugf("Watch for directory %s already exists", directory) } } + f.logger.Infof("Adding file %s to datasources", file) f.files = append(f.files, file) } } + return nil } @@ -189,7 +203,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger args := strings.Split(dsn, "?") if len(args[0]) == 0 { - return fmt.Errorf("empty file:// DSN") + return errors.New("empty file:// DSN") } if len(args) == 2 && len(args[1]) != 0 { @@ -197,25 +211,30 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger if err != nil { return fmt.Errorf("could not parse file args: %w", err) } + for key, value := range params { switch key { case "log_level": if len(value) != 1 { return errors.New("expected zero or one value for 'log_level'") } + lvl, err := log.ParseLevel(value[0]) if err != nil { return fmt.Errorf("unknown level %s: %w", value[0], err) } + f.logger.Logger.SetLevel(lvl) case "max_buffer_size": if len(value) != 1 { return errors.New("expected zero or one value for 'max_buffer_size'") } + maxBufferSize, err := strconv.Atoi(value[0]) if err != nil { return fmt.Errorf("could not parse max_buffer_size %s: %w", value[0], err) } + f.config.MaxBufferSize = maxBufferSize default: return fmt.Errorf("unknown parameter %s", key) @@ -228,6 +247,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger f.config.UniqueId = uuid f.logger.Debugf("Will try pattern %s", args[0]) + files, err := filepath.Glob(args[0]) if err != nil { return fmt.Errorf("glob failure: %w", err) @@ -245,6 +265,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger f.logger.Infof("Adding file %s to filelist", file) f.files = append(f.files, file) } + return nil } @@ -260,22 +281,26 @@ func (f *FileSource) SupportedModes() []string { // OneShotAcquisition reads a set of file and returns when done func (f *FileSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("In oneshot") + for _, file := range f.files { fi, err := os.Stat(file) if err != nil { return fmt.Errorf("could not stat file %s : %w", file, err) } + if fi.IsDir() { f.logger.Warnf("%s is a directory, ignoring it.", file) continue } + f.logger.Infof("reading %s at once", file) + err = f.readFile(file, out, t) if err != nil { return err } - } + return nil } @@ -300,27 +325,33 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er t.Go(func() error { return f.monitorNewFiles(out, t) }) + for _, file := range f.files { - //before opening the file, check if we need to specifically avoid it. (XXX) + // before opening the file, check if we need to specifically avoid it. (XXX) skip := false + for _, pattern := range f.exclude_regexps { if pattern.MatchString(file) { f.logger.Infof("file %s matches exclusion pattern %s, skipping", file, pattern.String()) + skip = true + break } } + if skip { continue } - //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - //do not rely on stat, reclose file immediately as it's opened by Tail + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail fd, err := os.Open(file) if err != nil { f.logger.Errorf("unable to read %s : %s", file, err) continue } + if err := fd.Close(); err != nil { f.logger.Errorf("unable to close %s : %s", file, err) continue @@ -330,6 +361,7 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er if err != nil { return fmt.Errorf("could not stat file %s : %w", file, err) } + if fi.IsDir() { f.logger.Warnf("%s is a directory, ignoring it.", file) continue @@ -343,9 +375,12 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er if err != nil { f.logger.Warningf("Could not get fs type for %s : %s", file, err) } + f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) + if networkFS { f.logger.Warnf("Disabling inotify poll on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + inotifyPoll = false } } @@ -355,6 +390,7 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue } + f.tailMapMutex.Lock() f.tails[file] = true f.tailMapMutex.Unlock() @@ -363,6 +399,7 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er return f.tailFile(out, t, tail) }) } + return nil } @@ -372,6 +409,7 @@ func (f *FileSource) Dump() interface{} { func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger := f.logger.WithField("goroutine", "inotify") + for { select { case event, ok := <-f.watcher.Events: @@ -385,36 +423,47 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) continue } + if fi.IsDir() { continue } + logger.Debugf("Detected new file %s", event.Name) + matched := false + for _, pattern := range f.config.Filenames { logger.Debugf("Matching %s with %s", pattern, event.Name) + matched, err = filepath.Match(pattern, event.Name) if err != nil { logger.Errorf("Could not match pattern : %s", err) continue } + if matched { logger.Debugf("Matched %s with %s", pattern, event.Name) break } } + if !matched { continue } - //before opening the file, check if we need to specifically avoid it. (XXX) + // before opening the file, check if we need to specifically avoid it. (XXX) skip := false + for _, pattern := range f.exclude_regexps { if pattern.MatchString(event.Name) { f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) + skip = true + break } } + if skip { continue } @@ -422,13 +471,14 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { f.tailMapMutex.RLock() if f.tails[event.Name] { f.tailMapMutex.RUnlock() - //we already have a tail on it, do not start a new one + // we already have a tail on it, do not start a new one logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + break } f.tailMapMutex.RUnlock() - //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - //do not rely on stat, reclose file immediately as it's opened by Tail + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail fd, err := os.Open(event.Name) if err != nil { f.logger.Errorf("unable to read %s : %s", event.Name, err) @@ -450,19 +500,22 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { if err != nil { f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + if networkFS { inotifyPoll = false } } } - //Slightly different parameters for Location, as we want to read the first lines of the newly created file + // Slightly different parameters for Location, as we want to read the first lines of the newly created file tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) if err != nil { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break } + f.tailMapMutex.Lock() f.tails[event.Name] = true f.tailMapMutex.Unlock() @@ -475,12 +528,14 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { if !ok { return nil } + logger.Errorf("Error while monitoring folder: %s", err) case <-t.Dying(): err := f.watcher.Close() if err != nil { return fmt.Errorf("could not remove all inotify watches: %w", err) } + return nil } } @@ -489,39 +544,47 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tail) error { logger := f.logger.WithField("tail", tail.Filename) logger.Debugf("-> Starting tail of %s", tail.Filename) + for { select { case <-t.Dying(): logger.Infof("File datasource %s stopping", tail.Filename) + if err := tail.Stop(); err != nil { f.logger.Errorf("error in stop : %s", err) return err } + return nil - case <-tail.Dying(): //our tailer is dying + case <-tail.Dying(): // our tailer is dying err := tail.Err() errMsg := fmt.Sprintf("file reader of %s died", tail.Filename) if err != nil { errMsg = fmt.Sprintf(errMsg+" : %s", err) } + logger.Warningf(errMsg) - t.Kill(fmt.Errorf(errMsg)) - return fmt.Errorf(errMsg) + + return nil case line := <-tail.Lines: if line == nil { logger.Warningf("tail for %s is empty", tail.Filename) continue } + if line.Err != nil { logger.Warningf("fetch error : %v", line.Err) return line.Err } - if line.Text == "" { //skip empty lines + + if line.Text == "" { // skip empty lines continue } + if f.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() } + src := tail.Filename if f.metricsLevel == configuration.METRICS_AGGREGATE { src = filepath.Base(tail.Filename) @@ -535,7 +598,7 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai Process: true, Module: f.GetName(), } - //we're tailing, it must be real time logs + // we're tailing, it must be real time logs logger.Debugf("pushing %+v", l) expectMode := types.LIVE @@ -549,12 +612,14 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tomb) error { var scanner *bufio.Scanner + logger := f.logger.WithField("oneshot", filename) fd, err := os.Open(filename) if err != nil { return fmt.Errorf("failed opening %s: %w", filename, err) } + defer fd.Close() if strings.HasSuffix(filename, ".gz") { @@ -563,17 +628,20 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom logger.Errorf("Failed to read gz file: %s", err) return fmt.Errorf("failed to read gz %s: %w", filename, err) } + defer gz.Close() scanner = bufio.NewScanner(gz) - } else { scanner = bufio.NewScanner(fd) } + scanner.Split(bufio.ScanLines) + if f.config.MaxBufferSize > 0 { buf := make([]byte, 0, 64*1024) scanner.Buffer(buf, f.config.MaxBufferSize) } + for scanner.Scan() { select { case <-t.Dying(): @@ -583,6 +651,7 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom if scanner.Text() == "" { continue } + l := types.Line{ Raw: scanner.Text(), Time: time.Now().UTC(), @@ -594,15 +663,19 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom logger.Debugf("line %s", l.Raw) linesRead.With(prometheus.Labels{"source": filename}).Inc() - //we're reading logs at once, it must be time-machine buckets + // we're reading logs at once, it must be time-machine buckets out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} } } + if err := scanner.Err(); err != nil { logger.Errorf("Error while reading file: %s", err) t.Kill(err) + return err } + t.Kill(nil) + return nil } From b411782648a958fab35d307fda27803b9751a8f6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:03:54 +0100 Subject: [PATCH 068/318] CI: use go 1.21.8 (#2906) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 816234adf61..bd84389b011 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 5c019933304..cc90961bfd6 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 0f3c69ccefa..18cbb50a12e 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 8e890e0f8b9..6780727e968 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index e116e0d2e24..ae3a475774d 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 7c4e749ece7..68c95ed6446 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 9dfcc089a63..6629250c592 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 61dadfc0b78..e0603e0dec3 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index d251677fd46..6fd582da853 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 420c521fa58..ed1ac5e28c2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.7-alpine3.18 AS build +FROM golang:1.21.8-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 48753e7acdb..fd40bd475bb 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.7-bookworm AS build +FROM golang:1.21.8-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 791f41f50ba..6151fe22274 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.7' + version: '1.21.8' - pwsh: | choco install -y make From 6de62a1468fef07a74690b878e8d5a451df9f022 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 19 Mar 2024 10:22:43 +0100 Subject: [PATCH 069/318] warn if user is using inotify to tail a symlink (#2881) --- pkg/acquisition/modules/file/file.go | 61 +++++++++++++++++----------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index ee3c844a556..c36672507db 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -367,9 +367,9 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er continue } - inotifyPoll := true + pollFile := false if f.config.PollWithoutInotify != nil { - inotifyPoll = *f.config.PollWithoutInotify + pollFile = *f.config.PollWithoutInotify } else { networkFS, fsType, err := types.IsNetworkFS(file) if err != nil { @@ -379,13 +379,23 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) if networkFS { - f.logger.Warnf("Disabling inotify poll on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) - - inotifyPoll = false + f.logger.Warnf("Disabling inotify polling on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + pollFile = true } } - tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) + filink, err := os.Lstat(file) + + if err != nil { + f.logger.Errorf("Could not lstat() new file %s, ignoring it : %s", file, err) + continue + } + + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + f.logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", file) + } + + tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) if err != nil { f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue @@ -489,28 +499,33 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { continue } - inotifyPoll := true + pollFile := false if f.config.PollWithoutInotify != nil { - inotifyPoll = *f.config.PollWithoutInotify + pollFile = *f.config.PollWithoutInotify } else { - if f.config.PollWithoutInotify != nil { - inotifyPoll = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(event.Name) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) - } - - f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - - if networkFS { - inotifyPoll = false - } + networkFS, fsType, err := types.IsNetworkFS(event.Name) + if err != nil { + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + if networkFS { + pollFile = true + } + } + + filink, err := os.Lstat(event.Name) + + if err != nil { + logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) + continue + } + + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) } - // Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + //Slightly different parameters for Location, as we want to read the first lines of the newly created file + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) if err != nil { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break From b63e64ee9f164531ab9ba98ead10a76d21b87c1e Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Tue, 19 Mar 2024 10:29:16 +0100 Subject: [PATCH 070/318] Fix locking logic for HA + add list unsubscribe for PAPI (#2904) * add list unsubscribe operation for papi * fix the locking logic for HA --- pkg/apiserver/apic.go | 13 ++++++++----- pkg/apiserver/papi_cmd.go | 37 +++++++++++++++++++++++++++++++++---- pkg/database/lock.go | 26 +++++++++++++++++++------- 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index f57ae685e45..2136edc8b8e 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -639,6 +639,14 @@ func (a *apic) PullTop(forcePull bool) error { return nil } + /*defer lock release*/ + defer func() { + log.Debug("Releasing lock for pullCAPI") + if err := a.dbClient.ReleasePullCAPILock(); err != nil { + log.Errorf("while releasing lock: %v", err) + } + }() + log.Infof("Starting community-blocklist update") data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) @@ -690,11 +698,6 @@ func (a *apic) PullTop(forcePull bool) error { return fmt.Errorf("while updating blocklists: %w", err) } - log.Debug("Releasing lock for pullCAPI") - if err := a.dbClient.ReleasePullCAPILock(); err != nil { - return fmt.Errorf("while releasing lock: %w", err) - } - return nil } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index ba02034882c..fb76223b9a0 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -37,6 +37,10 @@ type forcePull struct { Blocklist *blocklistLink `json:"blocklist,omitempty"` } +type listUnsubscribe struct { + Name string `json:"name"` +} + func DecisionCmd(message *Message, p *Papi, sync bool) error { switch message.Header.OperationCmd { case "delete": @@ -163,13 +167,38 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { func ManagementCmd(message *Message, p *Papi, sync bool) error { if sync { - log.Infof("Ignoring management command from PAPI in sync mode") + p.Logger.Infof("Ignoring management command from PAPI in sync mode") return nil } switch message.Header.OperationCmd { + + case "blocklist_unsubscribe": + data, err := json.Marshal(message.Data) + if err != nil { + return err + } + unsubscribeMsg := listUnsubscribe{} + if err := json.Unmarshal(data, &unsubscribeMsg); err != nil { + return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err) + } + if unsubscribeMsg.Name == "" { + return fmt.Errorf("message for '%s' contains bad data format: missing blocklist name", message.Header.OperationType) + } + p.Logger.Infof("Received blocklist_unsubscribe command from PAPI, unsubscribing from blocklist %s", unsubscribeMsg.Name) + + filter := make(map[string][]string) + filter["origin"] = []string{types.ListOrigin} + filter["scenario"] = []string{unsubscribeMsg.Name} + + _, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter) + if err != nil { + return fmt.Errorf("unable to delete decisions for list %s : %w", unsubscribeMsg.Name, err) + } + p.Logger.Infof("deleted %d decisions for list %s", len(deletedDecisions), unsubscribeMsg.Name) + case "reauth": - log.Infof("Received reauth command from PAPI, resetting token") + p.Logger.Infof("Received reauth command from PAPI, resetting token") p.apiClient.GetClient().Transport.(*apiclient.JWTTransport).ResetToken() case "force_pull": data, err := json.Marshal(message.Data) @@ -182,13 +211,13 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { } if forcePullMsg.Blocklist == nil { - log.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") + p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") err = p.apic.PullTop(true) if err != nil { return fmt.Errorf("failed to force pull operation: %s", err) } } else { - log.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) + p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, diff --git a/pkg/database/lock.go b/pkg/database/lock.go index 339226e8592..d25b71870f0 100644 --- a/pkg/database/lock.go +++ b/pkg/database/lock.go @@ -12,10 +12,12 @@ import ( ) const ( - CAPIPullLockTimeout = 120 + CAPIPullLockTimeout = 10 + CapiPullLockName = "pullCAPI" ) func (c *Client) AcquireLock(name string) error { + log.Debugf("acquiring lock %s", name) _, err := c.Ent.Lock.Create(). SetName(name). SetCreatedAt(types.UtcNow()). @@ -30,6 +32,7 @@ func (c *Client) AcquireLock(name string) error { } func (c *Client) ReleaseLock(name string) error { + log.Debugf("releasing lock %s", name) _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) @@ -38,11 +41,12 @@ func (c *Client) ReleaseLock(name string) error { } func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { - log.Debugf("(%s) releasing orphin locks", name) + log.Debugf("releasing lock %s with timeout of %d minutes", name, timeout) _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(name), - lock.CreatedAtLT(time.Now().Add(-time.Duration(timeout)*time.Minute)), + lock.CreatedAtLT(time.Now().UTC().Add(-time.Duration(timeout)*time.Minute)), ).Exec(c.CTX) + if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } @@ -54,14 +58,22 @@ func (c *Client) IsLocked(err error) bool { } func (c *Client) AcquirePullCAPILock() error { - lockName := "pullCAPI" - err := c.ReleaseLockWithTimeout(lockName, CAPIPullLockTimeout) + + /*delete orphan "old" lock if present*/ + err := c.ReleaseLockWithTimeout(CapiPullLockName, CAPIPullLockTimeout) if err != nil { log.Errorf("unable to release pullCAPI lock: %s", err) } - return c.AcquireLock(lockName) + return c.AcquireLock(CapiPullLockName) } func (c *Client) ReleasePullCAPILock() error { - return c.ReleaseLockWithTimeout("pullCAPI", CAPIPullLockTimeout) + log.Debugf("deleting lock %s", CapiPullLockName) + _, err := c.Ent.Lock.Delete().Where( + lock.NameEQ(CapiPullLockName), + ).Exec(c.CTX) + if err != nil { + return errors.Wrapf(DeleteFail, "delete lock: %s", err) + } + return nil } From dd71f0a8663ac6615fdedf3c56f59c55853df858 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:48:49 +0100 Subject: [PATCH 071/318] CI: bump lint version and update configuration (#2901) * bump golangci-lint to 1.56 * lint (testifylint) * update lint configuration * windows test: remove stale code --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 42 +++++++++++++++----------- pkg/acquisition/modules/loki/loki.go | 2 +- pkg/apiserver/alerts_test.go | 2 +- pkg/csplugin/utils_windows.go | 4 --- pkg/exprhelpers/crowdsec_cti_test.go | 8 ++--- 7 files changed, 33 insertions(+), 29 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 6629250c592..643cb9b39c1 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -56,7 +56,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.55 + version: v1.56 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index e0603e0dec3..918cefb26ed 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -157,7 +157,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.55 + version: v1.56 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.golangci.yml b/.golangci.yml index 396da2141f1..758327e40fd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,6 @@ run: skip-dirs: - pkg/time/rate skip-files: - - pkg/database/ent/generate.go - pkg/yamlpatch/merge.go - pkg/yamlpatch/merge_test.go @@ -32,17 +31,31 @@ linters-settings: # Checks the number of lines in a function. # If lower than 0, disable the check. # Default: 60 - lines: -1 + # lower this after refactoring + lines: 437 # Checks the number of statements in a function. # If lower than 0, disable the check. # Default: 40 - statements: -1 + # lower this after refactoring + statements: 122 govet: check-shadowing: true + enable: + - atomicalign + - deepequalerrors + # TODO: - fieldalignment + - findcall + - nilness + # TODO: - reflectvaluecompare + - shadow + - sortslice + - timeformat + - unusedwrite lll: - line-length: 140 + # lower this after refactoring + line-length: 2607 maintidx: # raise this after refactoring @@ -68,10 +81,16 @@ linters-settings: depguard: rules: - main: + wrap: deny: - pkg: "github.com/pkg/errors" desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" + files: + - "!**/pkg/database/*.go" + - "!**/pkg/exprhelpers/*.go" + - "!**/pkg/acquisition/modules/appsec/appsec.go" + - "!**/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go" + - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - "!**/cmd/crowdsec-cli/alerts.go" @@ -105,7 +124,6 @@ linters-settings: - "!**/pkg/appsec/loader.go" - "!**/pkg/csplugin/broker.go" - "!**/pkg/csplugin/broker_test.go" - - "!**/pkg/dumps/bucker_dump.go" - "!**/pkg/dumps/bucket_dump.go" - "!**/pkg/dumps/parser_dump.go" - "!**/pkg/hubtest/coverage.go" @@ -177,6 +195,7 @@ linters: # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. + # - lll # Reports long lines # - logrlint # Check logr arguments. # - maintidx # maintidx measures the maintainability index of each function. # - makezero # Finds slice declarations with non-zero initial length @@ -214,7 +233,6 @@ linters: - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. - gosec # (gas): Inspects source code for security problems - inamedparam # reports interfaces with unnamed method parameters - - lll # Reports long lines - musttag # enforce field tags in (un)marshaled structs - promlinter # Check Prometheus metrics naming via promlint - protogetter # Reports direct reads from proto message fields when getters should be used @@ -363,13 +381,3 @@ issues: - linters: - nonamedreturns text: "named return .* with type .* found" - - # - # Will fix, might be trickier - # - - # https://github.com/pkg/errors/issues/245 - - linters: - - depguard - text: "import 'github.com/pkg/errors' is not allowed .*" - diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 3625c689516..52b87eb95d3 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -6,13 +6,13 @@ https://grafana.com/docs/loki/latest/api/#get-lokiapiv1tail import ( "context" + "errors" "fmt" "net/url" "strconv" "strings" "time" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" tomb "gopkg.in/tomb.v2" diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 5365058176d..812e33ae13b 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -151,7 +151,7 @@ func TestCreateAlertChannels(t *testing.T) { wg.Done() }() - go lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") wg.Wait() assert.Len(t, pd.Alert.Decisions, 1) apiServer.Close() diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index dfb11aff548..2870a2addb5 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -100,10 +100,6 @@ func CheckPerms(path string) error { return fmt.Errorf("no DACL found on plugin, meaning fully permissive access on plugin %s", path) } - if err != nil { - return fmt.Errorf("while looking up current user sid: %w", err) - } - rs := reflect.ValueOf(dacl).Elem() /* diff --git a/pkg/exprhelpers/crowdsec_cti_test.go b/pkg/exprhelpers/crowdsec_cti_test.go index fc3a236c561..84a4b245720 100644 --- a/pkg/exprhelpers/crowdsec_cti_test.go +++ b/pkg/exprhelpers/crowdsec_cti_test.go @@ -118,7 +118,7 @@ func TestNillClient(t *testing.T) { item, err := CrowdsecCTI("1.2.3.4") assert.Equal(t, err, cticlient.ErrDisabled) - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) } func TestInvalidAuth(t *testing.T) { @@ -133,7 +133,7 @@ func TestInvalidAuth(t *testing.T) { })) item, err := CrowdsecCTI("1.2.3.4") - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) assert.False(t, CTIApiEnabled) assert.Equal(t, err, cticlient.ErrUnauthorized) @@ -143,7 +143,7 @@ func TestInvalidAuth(t *testing.T) { })) item, err = CrowdsecCTI("1.2.3.4") - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) assert.False(t, CTIApiEnabled) assert.Equal(t, err, cticlient.ErrDisabled) } @@ -159,7 +159,7 @@ func TestNoKey(t *testing.T) { })) item, err := CrowdsecCTI("1.2.3.4") - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) assert.False(t, CTIApiEnabled) assert.Equal(t, err, cticlient.ErrDisabled) } From c76325b91b64137ee3df3f02b473270819f8d241 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 19 Mar 2024 17:42:08 +0100 Subject: [PATCH 072/318] Update windows pipeline (#2909) --- azure-pipelines.yml | 154 ++++++++++++++++++++++++++++++-------------- make_chocolatey.ps1 | 4 +- make_installer.ps1 | 2 +- 3 files changed, 111 insertions(+), 49 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 6151fe22274..f90af6f1718 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -15,15 +15,9 @@ pool: stages: - stage: Build jobs: - - job: + - job: Build displayName: "Build" steps: - - task: DotNetCoreCLI@2 - displayName: "Install SignClient" - inputs: - command: 'custom' - custom: 'tool' - arguments: 'install --global SignClient --version 1.3.155' - task: GoTool@0 displayName: "Install Go" inputs: @@ -39,24 +33,14 @@ stages: #we are not calling make windows_installer because we want to sign the binaries before they are added to the MSI script: | make build BUILD_RE2_WASM=1 - - task: AzureKeyVault@2 - inputs: - azureSubscription: 'Azure subscription 1(8a93ab40-7e99-445e-ad47-0f6a3e2ef546)' - KeyVaultName: 'CodeSigningSecrets' - SecretsFilter: 'CodeSigningUser,CodeSigningPassword' - RunAsPreJob: false - - - task: DownloadSecureFile@1 - inputs: - secureFile: appsettings.json - - - pwsh: | - SignClient.exe Sign --name "crowdsec-binaries" ` - --input "**/*.exe" --config (Join-Path -Path $(Agent.TempDirectory) -ChildPath "appsettings.json") ` - --user $(CodeSigningUser) --secret '$(CodeSigningPassword)' - displayName: "Sign Crowdsec binaries + plugins" + - pwsh: | $build_version=$env:BUILD_SOURCEBRANCHNAME + #Override the version if it's set in the pipeline + if ( ${env:USERBUILDVERSION} -ne "") + { + $build_version = ${env:USERBUILDVERSION} + } if ($build_version.StartsWith("v")) { $build_version = $build_version.Substring(1) @@ -69,35 +53,112 @@ stages: displayName: GetCrowdsecVersion name: GetCrowdsecVersion - pwsh: | - .\make_installer.ps1 -version '$(GetCrowdsecVersion.BuildVersion)' + Get-ChildItem -Path .\cmd -Directory | ForEach-Object { + $dirName = $_.Name + Get-ChildItem -Path .\cmd\$dirName -File -Filter '*.exe' | ForEach-Object { + $fileName = $_.Name + $destDir = Join-Path $(Build.ArtifactStagingDirectory) cmd\$dirName + New-Item -ItemType Directory -Path $destDir -Force + Copy-Item -Path .\cmd\$dirName\$fileName -Destination $destDir + } + } + displayName: "Copy binaries to staging directory" + - task: PublishPipelineArtifact@1 + inputs: + targetPath: '$(Build.ArtifactStagingDirectory)' + artifact: 'unsigned_binaries' + displayName: "Upload binaries artifact" + + - stage: Sign + dependsOn: Build + variables: + - group: 'FOSS Build Variables' + - name: BuildVersion + value: $[ stageDependencies.Build.Build.outputs['GetCrowdsecVersion.BuildVersion'] ] + condition: succeeded() + jobs: + - job: Sign + displayName: "Sign" + steps: + - download: current + artifact: unsigned_binaries + displayName: "Download binaries artifact" + - task: CopyFiles@2 + inputs: + SourceFolder: '$(Pipeline.Workspace)/unsigned_binaries' + TargetFolder: '$(Build.SourcesDirectory)' + displayName: "Copy binaries to workspace" + - task: DotNetCoreCLI@2 + displayName: "Install SignTool tool" + inputs: + command: 'custom' + custom: 'tool' + arguments: install --global sign --version 0.9.0-beta.23127.3 + - task: AzureKeyVault@2 + displayName: "Get signing parameters" + inputs: + azureSubscription: "Azure subscription" + KeyVaultName: "$(KeyVaultName)" + SecretsFilter: "TenantId,ClientId,ClientSecret,Certificate,KeyVaultUrl" + - pwsh: | + sign code azure-key-vault ` + "**/*.exe" ` + --base-directory "$(Build.SourcesDirectory)/cmd/" ` + --publisher-name "CrowdSec" ` + --description "CrowdSec" ` + --description-url "https://github.com/crowdsecurity/crowdsec" ` + --azure-key-vault-tenant-id "$(TenantId)" ` + --azure-key-vault-client-id "$(ClientId)" ` + --azure-key-vault-client-secret "$(ClientSecret)" ` + --azure-key-vault-certificate "$(Certificate)" ` + --azure-key-vault-url "$(KeyVaultUrl)" + displayName: "Sign crowdsec binaries" + - pwsh: | + .\make_installer.ps1 -version '$(BuildVersion)' displayName: "Build Crowdsec MSI" name: BuildMSI - - pwsh: | - .\make_chocolatey.ps1 -version '$(GetCrowdsecVersion.BuildVersion)' + .\make_chocolatey.ps1 -version '$(BuildVersion)' displayName: "Build Chocolatey nupkg" - - pwsh: | - SignClient.exe Sign --name "crowdsec-msi" ` - --input "*.msi" --config (Join-Path -Path $(Agent.TempDirectory) -ChildPath "appsettings.json") ` - --user $(CodeSigningUser) --secret '$(CodeSigningPassword)' - displayName: "Sign Crowdsec MSI" - - - task: PublishBuildArtifacts@1 + sign code azure-key-vault ` + "*.msi" ` + --base-directory "$(Build.SourcesDirectory)" ` + --publisher-name "CrowdSec" ` + --description "CrowdSec" ` + --description-url "https://github.com/crowdsecurity/crowdsec" ` + --azure-key-vault-tenant-id "$(TenantId)" ` + --azure-key-vault-client-id "$(ClientId)" ` + --azure-key-vault-client-secret "$(ClientSecret)" ` + --azure-key-vault-certificate "$(Certificate)" ` + --azure-key-vault-url "$(KeyVaultUrl)" + displayName: "Sign MSI package" + - pwsh: | + sign code azure-key-vault ` + "*.nupkg" ` + --base-directory "$(Build.SourcesDirectory)" ` + --publisher-name "CrowdSec" ` + --description "CrowdSec" ` + --description-url "https://github.com/crowdsecurity/crowdsec" ` + --azure-key-vault-tenant-id "$(TenantId)" ` + --azure-key-vault-client-id "$(ClientId)" ` + --azure-key-vault-client-secret "$(ClientSecret)" ` + --azure-key-vault-certificate "$(Certificate)" ` + --azure-key-vault-url "$(KeyVaultUrl)" + displayName: "Sign nuget package" + - task: PublishPipelineArtifact@1 inputs: - PathtoPublish: '$(Build.Repository.LocalPath)\\crowdsec_$(GetCrowdsecVersion.BuildVersion).msi' - ArtifactName: 'crowdsec.msi' - publishLocation: 'Container' - displayName: "Upload MSI artifact" - - - task: PublishBuildArtifacts@1 + targetPath: '$(Build.SourcesDirectory)/crowdsec_$(BuildVersion).msi' + artifact: 'signed_msi_package' + displayName: "Upload signed MSI artifact" + - task: PublishPipelineArtifact@1 inputs: - PathtoPublish: '$(Build.Repository.LocalPath)\\windows\\Chocolatey\\crowdsec\\crowdsec.$(GetCrowdsecVersion.BuildVersion).nupkg' - ArtifactName: 'crowdsec.nupkg' - publishLocation: 'Container' - displayName: "Upload nupkg artifact" + targetPath: '$(Build.SourcesDirectory)/crowdsec.$(BuildVersion).nupkg' + artifact: 'signed_nuget_package' + displayName: "Upload signed nuget artifact" + - stage: Publish - dependsOn: Build + dependsOn: Sign jobs: - deployment: "Publish" displayName: "Publish to GitHub" @@ -119,8 +180,7 @@ stages: assetUploadMode: 'replace' addChangeLog: false isPreRelease: true #we force prerelease because the pipeline is invoked on tag creation, which happens when we do a prerelease - #the .. is an ugly hack, but I can't find the var that gives D:\a\1 ... assets: | - $(Build.ArtifactStagingDirectory)\..\crowdsec.msi/*.msi - $(Build.ArtifactStagingDirectory)\..\crowdsec.nupkg/*.nupkg + $(Pipeline.Workspace)/signed_msi_package/*.msi + $(Pipeline.Workspace)/signed_nuget_package/*.nupkg condition: ne(variables['GetLatestPrelease.LatestPreRelease'], '') diff --git a/make_chocolatey.ps1 b/make_chocolatey.ps1 index 67f85c33d89..cceed28402f 100644 --- a/make_chocolatey.ps1 +++ b/make_chocolatey.ps1 @@ -15,4 +15,6 @@ if ($version.Contains("-")) Set-Location .\windows\Chocolatey\crowdsec Copy-Item ..\..\..\crowdsec_$version.msi tools\crowdsec.msi -choco pack --version $version \ No newline at end of file +choco pack --version $version + +Copy-Item crowdsec.$version.nupkg ..\..\..\ \ No newline at end of file diff --git a/make_installer.ps1 b/make_installer.ps1 index a20ffaf55b5..c927452ff72 100644 --- a/make_installer.ps1 +++ b/make_installer.ps1 @@ -1,7 +1,7 @@ param ( $version ) -$env:Path += ";C:\Program Files (x86)\WiX Toolset v3.11\bin" +$env:Path += ";C:\Program Files (x86)\WiX Toolset v3.14\bin" if ($version.StartsWith("v")) { $version = $version.Substring(1) From d9f2a22ee5b5a982bbf7a2f970b5929944ad7f2b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:27:28 +0100 Subject: [PATCH 073/318] cscli metrics -> sort table order (#2908) --- cmd/crowdsec-cli/metrics.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 0f92343868d..ad2b9ee79d8 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -272,9 +272,7 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string // if no sections are specified, we want all of them if len(sections) == 0 { - for section := range ms { - sections = append(sections, section) - } + sections = maptools.SortedKeys(ms) } for _, section := range sections { @@ -283,7 +281,7 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string switch formatType { case "human": - for section := range want { + for _, section := range maptools.SortedKeys(want) { want[section].Table(out, noUnit, showEmpty) } case "json": @@ -376,7 +374,7 @@ cscli metrics list`, } // expandAlias returns a list of sections. The input can be a list of sections or alias. -func (cli *cliMetrics) expandSectionGroups(args []string) []string { +func (cli *cliMetrics) expandAlias(args []string) []string { ret := []string{} for _, section := range args { @@ -422,7 +420,7 @@ cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - args = cli.expandSectionGroups(args) + args = cli.expandAlias(args) return cli.show(args, url, noUnit) }, } From 75a50c0c9d5b31cc6200e55f13c7b94ae1d5a41c Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 20 Mar 2024 14:02:29 +0100 Subject: [PATCH 074/318] improve a bit cscli examples when it comes to list mgmt (#2911) --- cmd/crowdsec-cli/alerts.go | 12 +++++++----- cmd/crowdsec-cli/decisions.go | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index ce304bcc777..908466f9eb2 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -177,9 +177,9 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return nil } -type cliAlerts struct{ +type cliAlerts struct { client *apiclient.ApiClient - cfg configGetter + cfg configGetter } func NewCLIAlerts(getconfig configGetter) *cliAlerts { @@ -253,8 +253,10 @@ func (cli *cliAlerts) NewListCmd() *cobra.Command { Example: `cscli alerts list cscli alerts list --ip 1.2.3.4 cscli alerts list --range 1.2.3.0/24 +cscli alerts list --origin lists cscli alerts list -s crowdsecurity/ssh-bf cscli alerts list --type ban`, + Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, @@ -358,7 +360,7 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { var ( ActiveDecision *bool AlertDeleteAll bool - delAlertByID string + delAlertByID string ) var alertDeleteFilter = apiclient.AlertsDeleteOpts{ @@ -449,7 +451,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, } - + flags := cmd.Flags() flags.SortFlags = false flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") @@ -520,7 +522,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { func (cli *cliAlerts) NewFlushCmd() *cobra.Command { var ( maxItems int - maxAge string + maxAge string ) cmd := &cobra.Command{ diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index d7165367898..a97536ddc14 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -195,7 +195,7 @@ func (cli *cliDecisions) newListCmd() *cobra.Command { Example: `cscli decisions list -i 1.2.3.4 cscli decisions list -r 1.2.3.0/24 cscli decisions list -s crowdsecurity/ssh-bf -cscli decisions list -t ban +cscli decisions list --origin lists --scenario list_name `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, @@ -436,6 +436,7 @@ func (cli *cliDecisions) newDeleteCmd() *cobra.Command { cscli decisions delete -i 1.2.3.4 cscli decisions delete --id 42 cscli decisions delete --type captcha +cscli decisions delete --origin lists --scenario list_name `, /*TBD : refaire le Long/Example*/ PreRunE: func(cmd *cobra.Command, _ []string) error { From 7779c7ff0c010537d9a1dc3a73abdb0e2c750c33 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:46:14 +0100 Subject: [PATCH 075/318] hub update: reload crowdsec if only data files have changed (#2912) --- pkg/cwhub/dataset.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 4612f357626..97fd9c5a0ff 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -32,6 +32,9 @@ func downloadFile(url string, destPath string) error { return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) } + // Download to a temporary location to avoid corrupting files + // that are currently in use or memory mapped. + tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp") if err != nil { return err @@ -57,6 +60,11 @@ func downloadFile(url string, destPath string) error { return err } + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("updated %s\n", filepath.Base(destPath)) + if err = os.Rename(tmpFileName, destPath); err != nil { return err } From 52f86c2d10f51a238b9e23961e65075a44c85fc9 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 21 Mar 2024 11:39:37 +0100 Subject: [PATCH 076/318] add libinjection expr helpers (#2914) --- pkg/exprhelpers/expr_lib.go | 14 +++++++ pkg/exprhelpers/libinjection.go | 17 ++++++++ pkg/exprhelpers/libinjection_test.go | 60 ++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) create mode 100644 pkg/exprhelpers/libinjection.go create mode 100644 pkg/exprhelpers/libinjection_test.go diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index db191b84a8d..520799287eb 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -441,6 +441,20 @@ var exprFuncs = []exprCustomFunc{ new(func(float64, float64) bool), }, }, + { + name: "LibInjectionIsSQLI", + function: LibInjectionIsSQLI, + signature: []interface{}{ + new(func(string) bool), + }, + }, + { + name: "LibInjectionIsXSS", + function: LibInjectionIsXSS, + signature: []interface{}{ + new(func(string) bool), + }, + }, } //go 1.20 "CutPrefix": strings.CutPrefix, diff --git a/pkg/exprhelpers/libinjection.go b/pkg/exprhelpers/libinjection.go new file mode 100644 index 00000000000..e9f33e4f459 --- /dev/null +++ b/pkg/exprhelpers/libinjection.go @@ -0,0 +1,17 @@ +package exprhelpers + +import "github.com/corazawaf/libinjection-go" + +func LibInjectionIsSQLI(params ...any) (any, error) { + str := params[0].(string) + + ret, _ := libinjection.IsSQLi(str) + return ret, nil +} + +func LibInjectionIsXSS(params ...any) (any, error) { + str := params[0].(string) + + ret := libinjection.IsXSS(str) + return ret, nil +} diff --git a/pkg/exprhelpers/libinjection_test.go b/pkg/exprhelpers/libinjection_test.go new file mode 100644 index 00000000000..7b4ab825db9 --- /dev/null +++ b/pkg/exprhelpers/libinjection_test.go @@ -0,0 +1,60 @@ +package exprhelpers + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLibinjectionHelpers(t *testing.T) { + tests := []struct { + name string + function func(params ...any) (any, error) + params []any + expectResult any + }{ + { + name: "LibInjectionIsSQLI", + function: LibInjectionIsSQLI, + params: []any{"?__f__73=73&&__f__75=75&delivery=1&max=24.9&min=15.9&n=12&o=2&p=(select(0)from(select(sleep(15)))v)/*'%2B(select(0)from(select(sleep(15)))v)%2B'\x22%2B(select(0)from(select(sleep(15)))v)%2B\x22*/&rating=4"}, + expectResult: true, + }, + { + name: "LibInjectionIsSQLI - no match", + function: LibInjectionIsSQLI, + params: []any{"?bla=42&foo=bar"}, + expectResult: false, + }, + { + name: "LibInjectionIsSQLI - no match 2", + function: LibInjectionIsSQLI, + params: []any{"https://foo.com/asdkfj?bla=42&foo=bar"}, + expectResult: false, + }, + { + name: "LibInjectionIsXSS", + function: LibInjectionIsXSS, + params: []any{""}, + expectResult: true, + }, + { + name: "LibInjectionIsXSS - no match", + function: LibInjectionIsXSS, + params: []any{"?bla=42&foo=bar"}, + expectResult: false, + }, + { + name: "LibInjectionIsXSS - no match 2", + function: LibInjectionIsXSS, + params: []any{"https://foo.com/asdkfj?bla=42&foo[]=bar&foo"}, + expectResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, _ := test.function(test.params...) + assert.Equal(t, test.expectResult, result) + }) + } +} From 2e1ddec107ed01937809d4f53b4ab8fa2e00f7e4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 25 Mar 2024 10:40:41 +0100 Subject: [PATCH 077/318] cscli: Add user-agent to all hub requests (#2915) * cscli: Add user-agent to all hub requests * fix unit test and avoid httpmock * fix windows test --- pkg/cwhub/cwhub.go | 14 +++++++++ pkg/cwhub/dataset_test.go | 62 +++++++++++++++++++++------------------ 2 files changed, 48 insertions(+), 28 deletions(-) diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 9ce091fad39..a7864d4c076 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -7,10 +7,24 @@ import ( "sort" "strings" "time" + + "github.com/crowdsecurity/go-cs-lib/version" ) +// hubTransport wraps a Transport to set a custom User-Agent. +type hubTransport struct { + http.RoundTripper +} + +func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", "crowdsec/"+version.String()) + return t.RoundTripper.RoundTrip(req) +} + +// hubClient is the HTTP client used to communicate with the CrowdSec Hub. var hubClient = &http.Client{ Timeout: 120 * time.Second, + Transport: &hubTransport{http.DefaultTransport}, } // safePath returns a joined path and ensures that it does not escape the base directory. diff --git a/pkg/cwhub/dataset_test.go b/pkg/cwhub/dataset_test.go index 93d3e3bf01e..e48202e4821 100644 --- a/pkg/cwhub/dataset_test.go +++ b/pkg/cwhub/dataset_test.go @@ -1,50 +1,56 @@ package cwhub import ( + "io" + "net/http" + "net/http/httptest" "os" + "path/filepath" "testing" - "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestDownloadFile(t *testing.T) { - examplePath := "./example.txt" - defer os.Remove(examplePath) - - httpmock.Activate() - defer httpmock.DeactivateAndReset() - - // OK - httpmock.RegisterResponder( - "GET", - "https://example.com/xx", - httpmock.NewStringResponder(200, "example content oneoneone"), - ) - - httpmock.RegisterResponder( - "GET", - "https://example.com/x", - httpmock.NewStringResponder(404, "not found"), - ) - - err := downloadFile("https://example.com/xx", examplePath) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/xx": + w.WriteHeader(http.StatusOK) + _, _ = io.WriteString(w, "example content oneoneone") + default: + w.WriteHeader(http.StatusNotFound) + _, _ = io.WriteString(w, "not found") + } + })) + defer ts.Close() + + dest := filepath.Join(t.TempDir(), "example.txt") + defer os.Remove(dest) + + err := downloadFile(ts.URL+"/xx", dest) require.NoError(t, err) - content, err := os.ReadFile(examplePath) + content, err := os.ReadFile(dest) assert.Equal(t, "example content oneoneone", string(content)) require.NoError(t, err) // bad uri - err = downloadFile("https://zz.com", examplePath) - require.Error(t, err) + err = downloadFile("https://zz.com", dest) + cstest.RequireErrorContains(t, err, "lookup zz.com") + cstest.RequireErrorContains(t, err, "no such host") // 404 - err = downloadFile("https://example.com/x", examplePath) - require.Error(t, err) + err = downloadFile(ts.URL+"/x", dest) + cstest.RequireErrorContains(t, err, "bad http code 404") // bad target - err = downloadFile("https://example.com/xx", "") - require.Error(t, err) + err = downloadFile(ts.URL+"/xx", "") + cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) + + // destination directory does not exist + err = downloadFile(ts.URL+"/xx", filepath.Join(t.TempDir(), "missing/example.txt")) + cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) } From f6bb8412c55426c7f459a85fa4f1b08fec4f487f Mon Sep 17 00:00:00 2001 From: Christian Kampka Date: Mon, 25 Mar 2024 16:20:16 +0100 Subject: [PATCH 078/318] Add patterns_dir configuration option (#2868) * Add patterns_dir configuration option * Update config.yaml --------- Co-authored-by: mmetc <92726601+mmetc@users.noreply.github.com> --- pkg/csconfig/config_paths.go | 6 ++++++ pkg/parser/unix_parser.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 71e3bacdaac..3de05ee0292 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -13,6 +13,7 @@ type ConfigurationPaths struct { HubDir string `yaml:"hub_dir,omitempty"` PluginDir string `yaml:"plugin_dir,omitempty"` NotificationDir string `yaml:"notification_dir,omitempty"` + PatternDir string `yaml:"pattern_dir,omitempty"` } func (c *Config) loadConfigurationPaths() error { @@ -33,6 +34,10 @@ func (c *Config) loadConfigurationPaths() error { c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") } + if c.ConfigPaths.PatternDir == "" { + c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns/") + } + var configPathsCleanup = []*string{ &c.ConfigPaths.HubDir, &c.ConfigPaths.HubIndexFile, @@ -41,6 +46,7 @@ func (c *Config) loadConfigurationPaths() error { &c.ConfigPaths.SimulationFilePath, &c.ConfigPaths.PluginDir, &c.ConfigPaths.NotificationDir, + &c.ConfigPaths.PatternDir, } for _, k := range configPathsCleanup { if *k == "" { diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 617e46189f3..720bac3d1fe 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -98,7 +98,7 @@ func NewParsers(hub *cwhub.Hub) *Parsers { func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { var err error - patternsDir := filepath.Join(cConfig.ConfigPaths.ConfigDir, "patterns/") + patternsDir := cConfig.ConfigPaths.PatternDir log.Infof("Loading grok library %s", patternsDir) /* load base regexps for two grok parsers */ parsers.Ctx, err = Init(map[string]interface{}{"patterns": patternsDir, From 368d22ec305c483c45ec459baafff34e8308e8de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:12:57 +0100 Subject: [PATCH 079/318] Bump github.com/jackc/pgx/v4 from 4.14.1 to 4.18.2 (#2887) Bumps [github.com/jackc/pgx/v4](https://github.com/jackc/pgx) from 4.14.1 to 4.18.2. - [Changelog](https://github.com/jackc/pgx/blob/v4.18.2/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v4.14.1...v4.18.2) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v4 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 18 +++++++++--------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 22d52be1ca8..604fef762b8 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/hashicorp/go-version v1.2.1 github.com/hexops/gotextdiff v1.0.3 github.com/ivanpirog/coloredcobra v1.0.1 - github.com/jackc/pgx/v4 v4.14.1 + github.com/jackc/pgx/v4 v4.18.2 github.com/jarcoal/httpmock v1.1.0 github.com/jszwec/csvutil v1.5.1 github.com/lithammer/dedent v1.1.0 @@ -81,9 +81,9 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.3.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.20.0 golang.org/x/mod v0.11.0 - golang.org/x/sys v0.15.0 + golang.org/x/sys v0.17.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -137,12 +137,12 @@ require ( github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.10.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.2.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.9.1 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -198,9 +198,9 @@ require ( github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/term v0.17.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/go.sum b/go.sum index 07bddaf8967..17a83cd6637 100644 --- a/go.sum +++ b/go.sum @@ -368,8 +368,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= -github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -385,26 +385,26 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.9.1 h1:MJc2s0MFS8C3ok1wQTdQxWuXQcB6+HwAm5x1CzW7mf0= -github.com/jackc/pgtype v1.9.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.14.1 h1:71oo1KAGI6mXhLiTMn6iDFcp3e7+zon/capWjl2OEFU= -github.com/jackc/pgx/v4 v4.14.1/go.mod h1:RgDuE4Z34o7XE92RpLsvFiOEfrAUT0Xt2KxvX73W06M= +github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= @@ -757,8 +757,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -791,8 +791,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -850,8 +850,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From df13f4315620ab601a16a077936b0185c3a584d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:13:16 +0100 Subject: [PATCH 080/318] Bump github.com/docker/docker (#2913) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 24.0.7+incompatible to 24.0.9+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v24.0.7...v24.0.9) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 604fef762b8..50b88f7d425 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.3.0 - github.com/docker/docker v24.0.7+incompatible + github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 github.com/fatih/color v1.15.0 github.com/fsnotify/fsnotify v1.6.0 diff --git a/go.sum b/go.sum index 17a83cd6637..8f91bd31ff6 100644 --- a/go.sum +++ b/go.sum @@ -116,8 +116,8 @@ github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= From be97466809af04775f82d481371f459498536fd8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:30:32 +0100 Subject: [PATCH 081/318] CI: use golangci-lint 1.57 (#2916) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 24 +++++++++++++----------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 643cb9b39c1..6b2f1132a82 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -56,7 +56,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.56 + version: v1.57 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 918cefb26ed..d76315462f3 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -157,7 +157,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.56 + version: v1.57 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.golangci.yml b/.golangci.yml index 758327e40fd..df0cb67d1a8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,5 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml -run: - skip-dirs: - - pkg/time/rate - skip-files: - - pkg/yamlpatch/merge.go - - pkg/yamlpatch/merge_test.go - linters-settings: cyclop: # lower this after refactoring @@ -19,6 +12,10 @@ linters-settings: - prefix(github.com/crowdsecurity) - prefix(github.com/crowdsecurity/crowdsec) + gomoddirectives: + replace-allow-list: + - golang.org/x/time/rate + gocognit: # lower this after refactoring min-complexity: 145 @@ -40,7 +37,6 @@ linters-settings: statements: 122 govet: - check-shadowing: true enable: - atomicalign - deepequalerrors @@ -295,15 +291,21 @@ issues: # “Look, that’s why there’s rules, understand? So that you think before you # break ‘em.” ― Terry Pratchett + exclude-dirs: + - pkg/time/rate + + exclude-files: + - pkg/yamlpatch/merge.go + - pkg/yamlpatch/merge_test.go + + exclude-generated-strict: true + max-issues-per-linter: 0 max-same-issues: 0 exclude-rules: # Won't fix: - - path: go.mod - text: "replacement are not allowed: golang.org/x/time/rate" - # `err` is often shadowed, we may continue to do it - linters: - govet From 63bd31b471e765dd85a2d7760483782111e0fb68 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Fri, 29 Mar 2024 17:57:54 +0100 Subject: [PATCH 082/318] Fix REQUEST_URI behavior + fix #2891 (#2917) * fix our behavior to comply more with modsec, REQUEST_URI should be: path+query string * fix #2891 as well * add new transforms * add transform tests --- .../modules/appsec/appsec_others_test.go | 74 +++++++ pkg/acquisition/modules/appsec/appsec_test.go | 200 ++++++++++++++++++ .../modules/appsec/appsec_win_test.go | 46 ++++ pkg/appsec/appsec_rule/modsecurity.go | 13 +- pkg/appsec/request.go | 6 +- 5 files changed, 333 insertions(+), 6 deletions(-) create mode 100644 pkg/acquisition/modules/appsec/appsec_others_test.go create mode 100644 pkg/acquisition/modules/appsec/appsec_win_test.go diff --git a/pkg/acquisition/modules/appsec/appsec_others_test.go b/pkg/acquisition/modules/appsec/appsec_others_test.go new file mode 100644 index 00000000000..93edc9d9ec3 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_others_test.go @@ -0,0 +1,74 @@ +//go:build !windows +// +build !windows + +package appsecacquisition + +import ( + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecRuleTransformsOthers(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "normalizepath", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "b/c"}, + Transform: []string{"normalizepath"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=a/../b/c", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "normalizepath #2", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "b/c/"}, + Transform: []string{"normalizepath"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=a/../b/c/////././././", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 25aea0c78ea..d98215bf2c7 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1284,6 +1284,206 @@ func TestAppsecRuleMatches(t *testing.T) { } } +func TestAppsecRuleTransforms(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "lowercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/TOTO", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "uppercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/TOTO"}, + Transform: []string{"uppercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode with extra padding", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw===", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "length", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "gte", Value: "3"}, + Transform: []string{"length"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "urldecode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%42%42%2F%41", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "trim", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode", "trim"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%20%20%42%42%2F%41%20%20", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + func loadAppSecEngine(test appsecRuleTest, t *testing.T) { if testing.Verbose() { log.SetLevel(log.TraceLevel) diff --git a/pkg/acquisition/modules/appsec/appsec_win_test.go b/pkg/acquisition/modules/appsec/appsec_win_test.go new file mode 100644 index 00000000000..e85d75df251 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_win_test.go @@ -0,0 +1,46 @@ +//go:build windows +// +build windows + +package appsecacquisition + +import ( + "testing" + + log "github.com/sirupsen/logrus" +) + +func TestAppsecRuleTransformsWindows(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + // { + // name: "normalizepath", + // expected_load_ok: true, + // inband_rules: []appsec_rule.CustomRule{ + // { + // Name: "rule1", + // Zones: []string{"ARGS"}, + // Variables: []string{"foo"}, + // Match: appsec_rule.Match{Type: "equals", Value: "b/c"}, + // Transform: []string{"normalizepath"}, + // }, + // }, + // input_request: appsec.ParsedRequest{ + // RemoteAddr: "1.2.3.4", + // Method: "GET", + // URI: "/?foo=a/../b/c", + // }, + // output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + // require.Len(t, events, 2) + // require.Equal(t, types.APPSEC, events[0].Type) + // require.Equal(t, types.LOG, events[1].Type) + // require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + // }, + // }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index 0b117cd773d..a269384ccb9 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -19,7 +19,8 @@ var zonesMap map[string]string = map[string]string{ "HEADERS": "REQUEST_HEADERS", "METHOD": "REQUEST_METHOD", "PROTOCOL": "REQUEST_PROTOCOL", - "URI": "REQUEST_URI", + "URI": "REQUEST_FILENAME", + "URI_FULL": "REQUEST_URI", "RAW_BODY": "REQUEST_BODY", "FILENAMES": "FILES", } @@ -28,8 +29,14 @@ var transformMap map[string]string = map[string]string{ "lowercase": "t:lowercase", "uppercase": "t:uppercase", "b64decode": "t:base64Decode", - "hexdecode": "t:hexDecode", - "length": "t:length", + //"hexdecode": "t:hexDecode", -> not supported by coraza + "length": "t:length", + "urldecode": "t:urlDecode", + "trim": "t:trim", + "normalize_path": "t:normalizePath", + "normalizepath": "t:normalizePath", + "htmlentitydecode": "t:htmlEntityDecode", + "html_entity_decode": "t:htmlEntityDecode", } var matchMap map[string]string = map[string]string{ diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index a9eb0d372a4..66b5d797fd7 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -365,11 +365,11 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR UUID: uuid.New().String(), ClientHost: clientHost, ClientIP: clientIP, - URI: parsedURL.Path, + URI: clientURI, Method: clientMethod, - Host: r.Host, + Host: clientHost, Headers: r.Header, - URL: r.URL, + URL: parsedURL, Proto: r.Proto, Body: body, Args: ParseQuery(parsedURL.RawQuery), From 26bcd0912aa432dbf525359ab0fd8426fd24745a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 3 Apr 2024 13:34:35 +0200 Subject: [PATCH 083/318] docker: distribute geoip db in slim image (#2920) --- Dockerfile | 11 ++--------- docker/test/default.env | 2 +- docker/test/tests/test_flavors.py | 2 +- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index ed1ac5e28c2..53a6cd04512 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,11 +43,12 @@ COPY --from=build /go/bin/yq /usr/local/bin/crowdsec /usr/local/bin/cscli /usr/l COPY --from=build /etc/crowdsec /staging/etc/crowdsec COPY --from=build /go/src/crowdsec/docker/docker_start.sh / COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml +COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml ENTRYPOINT /bin/bash /docker_start.sh -FROM slim as plugins +FROM slim as full # Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp # The files are here for reference, as users will need to mount a new version to be actually able to use notifications @@ -60,11 +61,3 @@ COPY --from=build \ /staging/etc/crowdsec/notifications/ COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins - -FROM slim as geoip - -COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec - -FROM plugins as full - -COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec diff --git a/docker/test/default.env b/docker/test/default.env index c46fdab7f1d..9607c8aaa5b 100644 --- a/docker/test/default.env +++ b/docker/test/default.env @@ -6,7 +6,7 @@ CROWDSEC_TEST_VERSION="dev" # All of the following flavors will be tested when using the "flavor" fixture CROWDSEC_TEST_FLAVORS="full" # CROWDSEC_TEST_FLAVORS="full,slim,debian" -# CROWDSEC_TEST_FLAVORS="full,slim,debian,geoip,plugins-debian-slim,debian-geoip,debian-plugins" +# CROWDSEC_TEST_FLAVORS="full,slim,debian,debian-slim" # network to use CROWDSEC_TEST_NETWORK="net-test" diff --git a/docker/test/tests/test_flavors.py b/docker/test/tests/test_flavors.py index 223cf995cba..7e78b8d681b 100644 --- a/docker/test/tests/test_flavors.py +++ b/docker/test/tests/test_flavors.py @@ -42,7 +42,7 @@ def test_flavor_content(crowdsec, flavor): x = cs.cont.exec_run( 'ls -1 /usr/local/lib/crowdsec/plugins/') stdout = x.output.decode() - if 'slim' in flavor or 'geoip' in flavor: + if 'slim' in flavor: # the exact return code and full message depend # on the 'ls' implementation (busybox vs coreutils) assert x.exit_code != 0 From 912c4bca707cbe9434f5ee62f1d1b92d34395ccd Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 3 Apr 2024 17:49:05 +0200 Subject: [PATCH 084/318] split & reorganize tests a bit. Add tests on existing zones (#2925) --- .../modules/appsec/appsec_hooks_test.go | 714 ++++++++ ...psec_others_test.go => appsec_lnx_test.go} | 0 .../modules/appsec/appsec_remediation_test.go | 320 ++++ .../modules/appsec/appsec_rules_test.go | 733 +++++++++ pkg/acquisition/modules/appsec/appsec_test.go | 1454 ----------------- 5 files changed, 1767 insertions(+), 1454 deletions(-) create mode 100644 pkg/acquisition/modules/appsec/appsec_hooks_test.go rename pkg/acquisition/modules/appsec/{appsec_others_test.go => appsec_lnx_test.go} (100%) create mode 100644 pkg/acquisition/modules/appsec/appsec_remediation_test.go create mode 100644 pkg/acquisition/modules/appsec/appsec_rules_test.go diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go new file mode 100644 index 00000000000..3cb2fcfde29 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -0,0 +1,714 @@ +package appsecacquisition + +import ( + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecOnMatchHooks(t *testing.T) { + tests := []appsecRuleTest{ + { + name: "no rule : check return code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 403, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + + }, + }, + { + name: "on_match: change return code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(413)"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 413, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to a non standard one (log)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('log')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, "log", responses[0].Action) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 403, responses[0].UserHTTPResponseCode) + }, + }, + { + name: "on_match: change action to another standard one (allow)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to another standard one (ban)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, responses, 1) + //note: SetAction normalizes deny, ban and block to ban + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to another standard one (captcha)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, responses, 1) + //note: SetAction normalizes deny, ban and block to ban + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to a non standard one", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('foobar')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, + { + name: "on_match: cancel alert", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true && LogInfo('XX -> %s', evt.Appsec.MatchedRules.GetName())", Apply: []string{"CancelAlert()"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.LOG, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: cancel event", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.APPSEC, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecPreEvalHooks(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic on_load hook to disable inband rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1 == 1", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "Basic on_load fails to disable rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1 ==2", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + + }, + }, + { + name: "on_load : disable inband by tag", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"RemoveInBandRuleByTag('crowdsec-rulez')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : disable inband by ID", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"RemoveInBandRuleByID(1516470898)"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : disable inband by name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"RemoveInBandRuleByName('rulez')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : outofband default behavior", + expected_load_ok: true, + outofband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.LOG, events[0].Type) + require.True(t, events[0].Appsec.HasOutBandMatches) + require.False(t, events[0].Appsec.HasInBandMatches) + require.Len(t, events[0].Appsec.MatchedRules, 1) + require.Equal(t, "rulez", events[0].Appsec.MatchedRules[0]["msg"]) + //maybe surprising, but response won't mention OOB event, as it's sent as soon as the inband phase is over. + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : set remediation by tag", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"SetRemediationByTag('crowdsec-rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, + { + name: "on_load : set remediation by name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"SetRemediationByName('rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, + { + name: "on_load : set remediation by ID", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"SetRemediationByID(1516470898, 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRemediationConfigHooks(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} +func TestOnMatchRemediationHooks(t *testing.T) { + tests := []appsecRuleTest{ + { + name: "set remediation to allow with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "set remediation to captcha + custom user code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_others_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go similarity index 100% rename from pkg/acquisition/modules/appsec/appsec_others_test.go rename to pkg/acquisition/modules/appsec/appsec_lnx_test.go diff --git a/pkg/acquisition/modules/appsec/appsec_remediation_test.go b/pkg/acquisition/modules/appsec/appsec_remediation_test.go new file mode 100644 index 00000000000..a7f117389b3 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_remediation_test.go @@ -0,0 +1,320 @@ +package appsecacquisition + +import ( + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/stretchr/testify/require" +) + +func TestAppsecDefaultPassRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic non-matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: pass", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 200, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecDefaultRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to ban (default)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "ban", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom remediation + HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + DefaultRemediation: "foobar", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go new file mode 100644 index 00000000000..3c48c50fabb --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -0,0 +1,733 @@ +package appsecacquisition + +import ( + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecRuleMatches(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic non-matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "no default remediation / custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "no match but try to set remediation to captcha with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set user HTTP code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set remediation with pre_eval hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleTransforms(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "lowercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/TOTO", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "uppercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/TOTO"}, + Transform: []string{"uppercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode with extra padding", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw===", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "length", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "gte", Value: "3"}, + Transform: []string{"length"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "urldecode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%42%42%2F%41", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "trim", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode", "trim"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%20%20%42%42%2F%41%20%20", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleZones(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "rule: ARGS", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar?something=toto&foobar=smth", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: ARGS_NAMES", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar?something=toto&foobar=smth", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule2", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: BODY_ARGS", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"BODY_ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"BODY_ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Body: []byte("smth=toto&foobar=other"), + Headers: http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: BODY_ARGS_NAMES", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"BODY_ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"BODY_ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Body: []byte("smth=toto&foobar=other"), + Headers: http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule2", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: HEADERS", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"HEADERS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"HEADERS"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Headers: http.Header{"foobar": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: HEADERS_NAMES", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"HEADERS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"HEADERS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Headers: http.Header{"foobar": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule2", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: METHOD", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"METHOD"}, + Match: appsec_rule.Match{Type: "equals", Value: "GET"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: PROTOCOL", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"PROTOCOL"}, + Match: appsec_rule.Match{Type: "contains", Value: "3.1"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Proto: "HTTP/3.1", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: URI", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: URI_FULL", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI_FULL"}, + Match: appsec_rule.Match{Type: "equals", Value: "/foobar?a=b"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar?a=b", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: RAW_BODY", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"RAW_BODY"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar=42421"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Body: []byte("foobar=42421"), + Headers: http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index d98215bf2c7..5fe4cfe236c 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1,8 +1,6 @@ package appsecacquisition import ( - "net/http" - "net/url" "testing" "time" @@ -12,15 +10,8 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/google/uuid" log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" ) -/* -Missing tests (wip): - - GenerateResponse - - evt.Appsec and it's subobjects and methods -*/ - type appsecRuleTest struct { name string expected_load_ok bool @@ -39,1451 +30,6 @@ type appsecRuleTest struct { output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } -func TestAppsecOnMatchHooks(t *testing.T) { - tests := []appsecRuleTest{ - { - name: "no rule : check return code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) - require.Equal(t, 403, responses[0].UserHTTPResponseCode) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - - }, - }, - { - name: "on_match: change return code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(413)"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) - require.Equal(t, 413, responses[0].UserHTTPResponseCode) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to a non standard one (log)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('log')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, "log", responses[0].Action) - }, - }, - { - name: "on_match: change action to another standard one (allow)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to another standard one (ban)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, responses, 1) - //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to another standard one (captcha)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, responses, 1) - //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to a non standard one", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('foobar')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - }, - }, - { - name: "on_match: cancel alert", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true && LogInfo('XX -> %s', evt.Appsec.MatchedRules.GetName())", Apply: []string{"CancelAlert()"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 1) - require.Equal(t, types.LOG, events[0].Type) - require.Len(t, responses, 1) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - { - name: "on_match: cancel event", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 1) - require.Equal(t, types.APPSEC, events[0].Type) - require.Len(t, responses, 1) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecPreEvalHooks(t *testing.T) { - /* - [x] basic working hook - [x] basic failing hook - [ ] test the "OnSuccess" feature - [ ] test multiple competing hooks - [ ] test the variety of helpers - */ - tests := []appsecRuleTest{ - { - name: "Basic on_load hook to disable inband rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Filter: "1 == 1", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "Basic on_load fails to disable rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Filter: "1 ==2", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - - require.Equal(t, types.LOG, events[1].Type) - require.True(t, events[1].Appsec.HasInBandMatches) - require.Len(t, events[1].Appsec.MatchedRules, 1) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - - require.Len(t, responses, 1) - require.True(t, responses[0].InBandInterrupt) - - }, - }, - { - name: "on_load : disable inband by tag", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"RemoveInBandRuleByTag('crowdsec-rulez')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : disable inband by ID", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"RemoveInBandRuleByID(1516470898)"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : disable inband by name", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"RemoveInBandRuleByName('rulez')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : outofband default behavior", - expected_load_ok: true, - outofband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 1) - require.Equal(t, types.LOG, events[0].Type) - require.True(t, events[0].Appsec.HasOutBandMatches) - require.False(t, events[0].Appsec.HasInBandMatches) - require.Len(t, events[0].Appsec.MatchedRules, 1) - require.Equal(t, "rulez", events[0].Appsec.MatchedRules[0]["msg"]) - //maybe surprising, but response won't mention OOB event, as it's sent as soon as the inband phase is over. - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : set remediation by tag", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"SetRemediationByTag('crowdsec-rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - }, - }, - { - name: "on_load : set remediation by name", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"SetRemediationByName('rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - }, - }, - { - name: "on_load : set remediation by ID", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"SetRemediationByID(1516470898, 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - require.Equal(t, "foobar", appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecRemediationConfigHooks(t *testing.T) { - - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "SetRemediation", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "SetRemediation", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} -func TestOnMatchRemediationHooks(t *testing.T) { - tests := []appsecRuleTest{ - { - name: "set remediation to allow with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "set remediation to captcha + custom user code with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: appsec.AllowRemediation, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - spew.Dump(responses) - spew.Dump(appsecResponse) - - log.Errorf("http status : %d", statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - require.Equal(t, http.StatusForbidden, statusCode) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecDefaultPassRemediation(t *testing.T) { - - tests := []appsecRuleTest{ - { - name: "Basic non-matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassAction: pass", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - DefaultPassAction: "allow", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassAction: captcha", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - DefaultPassAction: "captcha", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassHTTPCode: 200", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - UserPassedHTTPCode: 200, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassHTTPCode: 200", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - UserPassedHTTPCode: 418, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecDefaultRemediation(t *testing.T) { - - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to ban (default)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "ban", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to allow", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "allow", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to captcha", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "captcha", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "custom user HTTP code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - UserBlockedHTTPCode: 418, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - { - name: "custom remediation + HTTP code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - UserBlockedHTTPCode: 418, - DefaultRemediation: "foobar", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, "foobar", responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, "foobar", appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecRuleMatches(t *testing.T) { - - /* - [x] basic matching rule - [x] basic non-matching rule - [ ] test the transformation - [ ] ? - */ - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - - require.Equal(t, types.LOG, events[1].Type) - require.True(t, events[1].Appsec.HasInBandMatches) - require.Len(t, events[1].Appsec.MatchedRules, 1) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - - require.Len(t, responses, 1) - require.True(t, responses[0].InBandInterrupt) - }, - }, - { - name: "Basic non-matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"tutu"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "default remediation to allow", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "allow", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to captcha", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "captcha", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "no default remediation / custom user HTTP code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - UserBlockedHTTPCode: 418, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - { - name: "no match but try to set remediation to captcha with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"bla"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - }, - }, - { - name: "no match but try to set user HTTP code with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"bla"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - }, - }, - { - name: "no match but try to set remediation with pre_eval hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"bla"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecRuleTransforms(t *testing.T) { - - log.SetLevel(log.TraceLevel) - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"URI"}, - Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/toto", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "lowercase", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"URI"}, - Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/TOTO", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "uppercase", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"URI"}, - Match: appsec_rule.Match{Type: "equals", Value: "/TOTO"}, - Transform: []string{"uppercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/toto", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "b64decode", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "toto"}, - Transform: []string{"b64decode"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=dG90bw", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "b64decode with extra padding", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "toto"}, - Transform: []string{"b64decode"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=dG90bw===", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "length", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "gte", Value: "3"}, - Transform: []string{"length"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=toto", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "urldecode", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, - Transform: []string{"urldecode"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=%42%42%2F%41", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "trim", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, - Transform: []string{"urldecode", "trim"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=%20%20%42%42%2F%41%20%20", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - func loadAppSecEngine(test appsecRuleTest, t *testing.T) { if testing.Verbose() { log.SetLevel(log.TraceLevel) From 2682f801dfe90a85a7bf4d3ec2a51136280a2dac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 5 Apr 2024 14:57:33 +0200 Subject: [PATCH 085/318] windows: fix data file update (remove before rename) (#2930) --- pkg/cwhub/dataset.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 97fd9c5a0ff..921361e3fcf 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -4,9 +4,11 @@ import ( "errors" "fmt" "io" + "io/fs" "net/http" "os" "path/filepath" + "runtime" "time" "github.com/sirupsen/logrus" @@ -65,6 +67,18 @@ func downloadFile(url string, destPath string) error { // TODO: use a better way to communicate this fmt.Printf("updated %s\n", filepath.Base(destPath)) + if runtime.GOOS == "windows" { + // On Windows, rename will fail if the destination file already exists + // so we remove it first. + err = os.Remove(destPath) + switch { + case errors.Is(err, fs.ErrNotExist): + break + case err != nil: + return err + } + } + if err = os.Rename(tmpFileName, destPath); err != nil { return err } From 990dd5e08e6efaa608910fdc258958c9db279518 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:11:11 +0200 Subject: [PATCH 086/318] use go 1.21.9; update dependencies (#2931) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- go.mod | 10 +++++----- go.sum | 16 ++++++++-------- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index bd84389b011..4e977201c5a 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index cc90961bfd6..9e320b1b3de 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 18cbb50a12e..2cd09b494a0 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 6780727e968..d56d69f28b2 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index ae3a475774d..278426d778c 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 68c95ed6446..1f27a8c7c2a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 6b2f1132a82..781f2a4a930 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index d76315462f3..67f73d81a45 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6fd582da853..6cdf111a4ba 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 53a6cd04512..1e311bfa89c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.8-alpine3.18 AS build +FROM golang:1.21.9-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index fd40bd475bb..ee0fa32bbd9 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.8-bookworm AS build +FROM golang:1.21.9-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f90af6f1718..b662a809ad7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.8' + version: '1.21.9' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index 50b88f7d425..04f34e6485e 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.3 github.com/cespare/xxhash/v2 v2.2.0 + github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.6 @@ -81,9 +82,9 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.3.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.20.0 + golang.org/x/crypto v0.22.0 golang.org/x/mod v0.11.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.19.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -104,7 +105,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bytedance/sonic v1.9.1 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect - github.com/corazawaf/libinjection-go v0.1.2 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/creack/pty v1.1.18 // indirect @@ -198,9 +198,9 @@ require ( github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/go.sum b/go.sum index 8f91bd31ff6..29e23f02ab9 100644 --- a/go.sum +++ b/go.sum @@ -757,8 +757,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= -golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -791,8 +791,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -850,8 +850,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 0e8a1c681b0c72dc45509aa14f6c5a6c9df83ab0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:53:12 +0200 Subject: [PATCH 087/318] docker: pre-download all hub items and data, opt-in hub update/upgrade (#2933) * docker: pre-download all hub items and data, opt-in hub update/upgrade * docker/bars: don't purge anything before pre-downloading hub * Docker: README update --- Dockerfile | 1 + docker/README.md | 11 +++++++++-- docker/docker_start.sh | 5 ++--- docker/preload-hub-items | 22 ++++++++++++++++++++++ test/bin/preload-hub-items | 19 ++----------------- 5 files changed, 36 insertions(+), 22 deletions(-) create mode 100755 docker/preload-hub-items diff --git a/Dockerfile b/Dockerfile index 1e311bfa89c..d2b01ed7753 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,6 +25,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/docker/README.md b/docker/README.md index 5e39838a175..2fea57a6161 100644 --- a/docker/README.md +++ b/docker/README.md @@ -134,7 +134,6 @@ labels: type: apache2 ``` - ## Recommended configuration ### Volumes @@ -146,6 +145,14 @@ to avoid losing credentials and decision data in case of container destruction a * Acquisition: `/etc/crowdsec/acquis.d` and/or `/etc/crowdsec.acquis.yaml` (yes, they can be nested in `/etc/crowdsec`) * Database when using SQLite (default): `/var/lib/crowdsec/data` +### Hub updates + +To ensure you have the latest version of the collections, scenarios, parsers, etc., you can set the variable `DO_HUB_UPGRADE` to true. +This will perform an update/upgrade of the hub every time the container is started. + +Be aware that if your container is misbehaving and caught in a restart loop, the CrowdSec hub may ban your IP for some time and your containers +will run with the version of the hub that is cached in the container's image. If you enable `DO_HUB_UPGRADE`, do it when your infrastructure is running +correctly and make sure you have some monitoring in place. ## Start a Crowdsec instance @@ -316,7 +323,7 @@ config.yaml) each time the container is run. | `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma | | | | | | __Hub management__ | | | -| `NO_HUB_UPGRADE` | false | Skip hub update / upgrade when the container starts | +| `DO_HUB_UPGRADE` | false | Force hub update / upgrade when the container starts. If for some reason the container restarts too often, it may lead to a temporary ban from hub updates. | | `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` | | `PARSERS` | | Parsers to install, separated by space | | `SCENARIOS` | | Scenarios to install, separated by space | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index dd96184ccbc..26c5b0eeedc 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -304,9 +304,8 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)' ## Install hub items -cscli hub update || true - -if isfalse "$NO_HUB_UPGRADE"; then +if istrue "$DO_HUB_UPGRADE"; then + cscli hub update || true cscli hub upgrade || true fi diff --git a/docker/preload-hub-items b/docker/preload-hub-items new file mode 100755 index 00000000000..d02b094851b --- /dev/null +++ b/docker/preload-hub-items @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -eu + +# pre-download everything but don't install anything + +echo "Pre-downloading Hub content..." + +types=$(cscli hub types -o raw) + +for itemtype in $types; do + ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') + if [[ -n "${ALL_ITEMS}" ]]; then + #shellcheck disable=SC2086 + cscli "$itemtype" install \ + $ALL_ITEMS \ + --download-only \ + --error + fi +done + +echo " done." diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index 14e9cff998c..ddf7fecbabc 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -9,20 +9,12 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # pre-download everything but don't install anything -echo -n "Purging existing hub..." +echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "${itemtype}" delete --all --error --purge --force -done - -echo " done." - -echo -n "Pre-downloading Hub content..." - -for itemtype in $types; do - ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name') + ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') if [[ -n "${ALL_ITEMS}" ]]; then #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ @@ -32,11 +24,4 @@ for itemtype in $types; do fi done -# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows. -# so we have to delete the links manually, and leave the downloaded files in place - -for itemtype in $types; do - "$CSCLI" "$itemtype" delete --all --error -done - echo " done." From 2291a232cb7188862bd1976615a19835e0f94ad2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:00:45 +0200 Subject: [PATCH 088/318] docker: pre-download hub items (debian image) (#2934) --- Dockerfile.debian | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile.debian b/Dockerfile.debian index ee0fa32bbd9..0d094f8dfa5 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -30,6 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists From 0746e0c091b8426791a4a495b25a186efd899c9d Mon Sep 17 00:00:00 2001 From: AlteredCoder <64792091+AlteredCoder@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:23:19 +0200 Subject: [PATCH 089/318] Rename bouncers to Remediation component in openAPI (#2936) * Rename bouncers to Remediation component in openAPI --- pkg/models/localapi_swagger.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 66132e5e36e..d167da9b6af 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -26,10 +26,10 @@ produces: paths: /decisions/stream: get: - description: Returns a list of new/expired decisions. Intended for bouncers that need to "stream" decisions + description: Returns a list of new/expired decisions. Intended for remediation component that need to "stream" decisions summary: getDecisionsStream tags: - - bouncers + - Remediation component operationId: getDecisionsStream deprecated: false produces: @@ -39,7 +39,7 @@ paths: in: query required: false type: boolean - description: 'If true, means that the bouncers is starting and a full list must be provided' + description: 'If true, means that the remediation component is starting and a full list must be provided' - name: scopes in: query required: false @@ -73,10 +73,10 @@ paths: security: - APIKeyAuthorizer: [] head: - description: Returns a list of new/expired decisions. Intended for bouncers that need to "stream" decisions + description: Returns a list of new/expired decisions. Intended for remediation component that need to "stream" decisions summary: GetDecisionsStream tags: - - bouncers + - Remediation component operationId: headDecisionsStream deprecated: false produces: @@ -100,7 +100,7 @@ paths: description: Returns information about existing decisions summary: getDecisions tags: - - bouncers + - Remediation component operationId: getDecisions deprecated: false produces: @@ -164,7 +164,7 @@ paths: description: Returns information about existing decisions summary: GetDecisions tags: - - bouncers + - Remediation component operationId: headDecisions deprecated: false produces: @@ -1008,7 +1008,7 @@ definitions: title: "error response" description: "error response return by the API" tags: - - name: bouncers + - name: Remediation component description: 'Operations about decisions : bans, captcha, rate-limit etc.' - name: watchers description: 'Operations about watchers : cscli & crowdsec' From c6e40191ddf7a87b8c0d946c36083c084677e5f1 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 18 Apr 2024 15:33:51 +0200 Subject: [PATCH 090/318] =?UTF-8?q?Revert=20"docker:=20pre-download=20all?= =?UTF-8?q?=20hub=20items=20and=20data,=20opt-in=20hub=20updat=E2=80=A6=20?= =?UTF-8?q?(#2947)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/docker-tests.yml | 14 +++++++------- Dockerfile | 1 - Dockerfile.debian | 1 - docker/README.md | 11 ++--------- docker/docker_start.sh | 5 +++-- docker/preload-hub-items | 22 ---------------------- test/bin/preload-hub-items | 19 +++++++++++++++++-- 7 files changed, 29 insertions(+), 44 deletions(-) delete mode 100755 docker/preload-hub-items diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index d3ae4f90d79..3e87d3ba4f1 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -59,15 +59,15 @@ jobs: cd docker/test python -m pip install --upgrade pipenv wheel - - name: "Cache virtualenvs" - id: cache-pipenv - uses: actions/cache@v4 - with: - path: ~/.local/share/virtualenvs - key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + #- name: "Cache virtualenvs" + # id: cache-pipenv + # uses: actions/cache@v4 + # with: + # path: ~/.local/share/virtualenvs + # key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} - name: "Install dependencies" - if: steps.cache-pipenv.outputs.cache-hit != 'true' + #if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test pipenv install --deploy diff --git a/Dockerfile b/Dockerfile index d2b01ed7753..1e311bfa89c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,7 +25,6 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ - ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 0d094f8dfa5..ee0fa32bbd9 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -30,7 +30,6 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ - ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/docker/README.md b/docker/README.md index 2fea57a6161..5e39838a175 100644 --- a/docker/README.md +++ b/docker/README.md @@ -134,6 +134,7 @@ labels: type: apache2 ``` + ## Recommended configuration ### Volumes @@ -145,14 +146,6 @@ to avoid losing credentials and decision data in case of container destruction a * Acquisition: `/etc/crowdsec/acquis.d` and/or `/etc/crowdsec.acquis.yaml` (yes, they can be nested in `/etc/crowdsec`) * Database when using SQLite (default): `/var/lib/crowdsec/data` -### Hub updates - -To ensure you have the latest version of the collections, scenarios, parsers, etc., you can set the variable `DO_HUB_UPGRADE` to true. -This will perform an update/upgrade of the hub every time the container is started. - -Be aware that if your container is misbehaving and caught in a restart loop, the CrowdSec hub may ban your IP for some time and your containers -will run with the version of the hub that is cached in the container's image. If you enable `DO_HUB_UPGRADE`, do it when your infrastructure is running -correctly and make sure you have some monitoring in place. ## Start a Crowdsec instance @@ -323,7 +316,7 @@ config.yaml) each time the container is run. | `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma | | | | | | __Hub management__ | | | -| `DO_HUB_UPGRADE` | false | Force hub update / upgrade when the container starts. If for some reason the container restarts too often, it may lead to a temporary ban from hub updates. | +| `NO_HUB_UPGRADE` | false | Skip hub update / upgrade when the container starts | | `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` | | `PARSERS` | | Parsers to install, separated by space | | `SCENARIOS` | | Scenarios to install, separated by space | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 26c5b0eeedc..dd96184ccbc 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -304,8 +304,9 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)' ## Install hub items -if istrue "$DO_HUB_UPGRADE"; then - cscli hub update || true +cscli hub update || true + +if isfalse "$NO_HUB_UPGRADE"; then cscli hub upgrade || true fi diff --git a/docker/preload-hub-items b/docker/preload-hub-items deleted file mode 100755 index d02b094851b..00000000000 --- a/docker/preload-hub-items +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -# pre-download everything but don't install anything - -echo "Pre-downloading Hub content..." - -types=$(cscli hub types -o raw) - -for itemtype in $types; do - ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') - if [[ -n "${ALL_ITEMS}" ]]; then - #shellcheck disable=SC2086 - cscli "$itemtype" install \ - $ALL_ITEMS \ - --download-only \ - --error - fi -done - -echo " done." diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index ddf7fecbabc..14e9cff998c 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -9,12 +9,20 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # pre-download everything but don't install anything -echo "Pre-downloading Hub content..." +echo -n "Purging existing hub..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') + "$CSCLI" "${itemtype}" delete --all --error --purge --force +done + +echo " done." + +echo -n "Pre-downloading Hub content..." + +for itemtype in $types; do + ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name') if [[ -n "${ALL_ITEMS}" ]]; then #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ @@ -24,4 +32,11 @@ for itemtype in $types; do fi done +# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows. +# so we have to delete the links manually, and leave the downloaded files in place + +for itemtype in $types; do + "$CSCLI" "$itemtype" delete --all --error +done + echo " done." From 95f27677e4c27d66cbf387c58c77a58491c5b378 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 22 Apr 2024 14:18:34 +0100 Subject: [PATCH 091/318] enhance: add refactoring to governance (#2955) --- .github/governance.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/governance.yml b/.github/governance.yml index c57fd362600..0457a24c7b0 100644 --- a/.github/governance.yml +++ b/.github/governance.yml @@ -42,7 +42,7 @@ issue: 3. Check [Releases](https://github.com/crowdsecurity/crowdsec/releases/latest) to make sure your agent is on the latest version. - prefix: kind - list: ['feature', 'bug', 'packaging', 'enhancement'] + list: ['feature', 'bug', 'packaging', 'enhancement', 'refactoring'] multiple: false author_association: author: true @@ -54,6 +54,7 @@ issue: @$AUTHOR: There are no 'kind' label on this issue. You need a 'kind' label to start the triage process. * `/kind feature` * `/kind enhancement` + * `/kind refactoring` * `/kind bug` * `/kind packaging` @@ -65,12 +66,13 @@ pull_request: labels: - prefix: kind multiple: false - list: [ 'feature', 'enhancement', 'fix', 'chore', 'dependencies'] + list: [ 'feature', 'enhancement', 'fix', 'chore', 'dependencies', 'refactoring'] needs: comment: | @$AUTHOR: There are no 'kind' label on this PR. You need a 'kind' label to generate the release automatically. * `/kind feature` * `/kind enhancement` + * `/kind refactoring` * `/kind fix` * `/kind chore` * `/kind dependencies` From 630cbf0c70658dff679bb1b95e7b54ba43dd4c0a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:18:11 +0200 Subject: [PATCH 092/318] update linter list and descriptions (#2951) --- .golangci.yml | 77 +++++++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 33 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index df0cb67d1a8..f38fa337a8d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -147,37 +147,40 @@ linters: # # DEPRECATED by golangi-lint # - - deadcode # The owner seems to have abandoned the linter. Replaced by unused. - - exhaustivestruct # The owner seems to have abandoned the linter. Replaced by exhaustruct. - - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - - ifshort # Checks that your code uses short syntax for if-statements whenever possible - - interfacer # Linter that suggests narrower interface types - - maligned # Tool to detect Go structs that would take less memory if their fields were sorted - - nosnakecase # nosnakecase is a linter that detects snake case of variable naming and function name. - - scopelint # Scopelint checks for unpinned variables in go programs - - structcheck # The owner seems to have abandoned the linter. Replaced by unused. - - varcheck # The owner seems to have abandoned the linter. Replaced by unused. + - deadcode + - exhaustivestruct + - golint + - ifshort + - interfacer + - maligned + - nosnakecase + - scopelint + - structcheck + - varcheck # # Enabled # # - asasalint # check for pass []any as any in variadic func(...any) - # - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully + # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied # - cyclop # checks function and package cyclomatic complexity # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages # - dupword # checks for duplicate words in the source code # - durationcheck # check for two durations multiplied together - # - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. # - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega + # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code + # - gochecksumtype # Run exhaustiveness checks on Go "sum types" # - gocognit # Computes and checks the cognitive complexity of functions # - gocritic # Provides diagnostics that check for bugs, performance and style issues. # - gocyclo # Computes and checks the cyclomatic complexity of functions @@ -185,48 +188,56 @@ linters: # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. # - goprintffuncname # Checks that printf-like functions are named with `f` at the end - # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying a code - # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - # - grouper # An analyzer to analyze expression groups. + # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying code + # - gosmopolitan # Report certain i18n/l10n anti-patterns in your Go codebase + # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs. It is roughly the same as 'go vet' and uses its passes. + # - grouper # Analyze expression groups. # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. + # - intrange # intrange is a linter to find places where for loops could make use of an integer range. # - lll # Reports long lines + # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). # - logrlint # Check logr arguments. # - maintidx # maintidx measures the maintainability index of each function. # - makezero # Finds slice declarations with non-zero initial length - # - misspell # Finds commonly misspelled English words in comments - # - nakedret # Finds naked returns in functions greater than a specified function length + # - mirror # reports wrong mirror patterns of bytes/strings usage + # - misspell # Finds commonly misspelled English words + # - nakedret # Checks that functions with naked returns are not longer than a maximum size (can be zero). # - nestif # Reports deeply nested if statements # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. # - nolintlint # Reports ill-formed or insufficient nolint directives # - nonamedreturns # Reports all named returns # - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. + # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. # - predeclared # find code that shadows one of Go's predeclared identifiers # - reassign # Checks that package variables are not reassigned - # - rowserrcheck # checks whether Err of rows is checked successfully - # - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. - # - staticcheck # (megacheck): Staticcheck is a go vet on steroids, applying a ton of static analysis checks - # - testableexamples # linter checks if examples are testable (have an expected output) + # - rowserrcheck # checks whether Rows.Err of rows is checked successfully + # - sloglint # ensure consistent code style when using log/slog + # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. + # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. + # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + # - testableexamples # linter checks if examples are testable (have an expected output) + # - testifylint # Checks usage of github.com/stretchr/testify. # - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - # - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code # - unconvert # Remove unnecessary type conversions # - unused # (megacheck): Checks Go code for unused constants, variables, functions and types # - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library. - # - wastedassign # wastedassign finds wasted assignment statements. + # - wastedassign # Finds wasted assignment statements + # - zerologlint # Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg` # # Recommended? (easy) # - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted. - exhaustive # check exhaustiveness of enum switch statements - gci # Gci control golang package import order and make it always deterministic. - godot # Check if comments end in a period - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification - - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. + - goimports # Check import statements are formatted according to the 'goimport' command. Reformat imports in autofix mode. - gosec # (gas): Inspects source code for security problems - inamedparam # reports interfaces with unnamed method parameters - musttag # enforce field tags in (un)marshaled structs @@ -234,7 +245,7 @@ linters: - protogetter # Reports direct reads from proto message fields when getters should be used - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - tagalign # check that struct tags are well aligned - - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - thelper # thelper detects tests helpers which is not start with t.Helper() method. - wrapcheck # Checks that errors returned from external packages are wrapped # @@ -242,12 +253,12 @@ linters: # - containedctx # containedctx is a linter that detects struct contained context.Context field - - contextcheck # check the function whether use a non-inherited context + - contextcheck # check whether the function uses a non-inherited context - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. - - noctx # noctx finds sending http request without context.Context + - noctx # Finds sending http request without context.Context - unparam # Reports unused function parameters # @@ -256,8 +267,8 @@ linters: - gofumpt # Gofumpt checks whether code was gofumpt-ed. - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity - - whitespace # Tool for detection of leading and trailing whitespace - - wsl # Whitespace Linter - Forces you to use empty lines! + - whitespace # Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc. + - wsl # add or remove empty lines # # Well intended, but not ready for this @@ -265,8 +276,8 @@ linters: - dupl # Tool for code clone detection - forcetypeassert # finds forced type assertions - godox # Tool for detection of FIXME, TODO and other comment keywords - - goerr113 # Golang linter to check the errors handling expressions - - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - goerr113 # Go linter to check the errors handling expressions + - paralleltest # Detects missing usage of t.Parallel() method in your Go test - testpackage # linter that makes you use a separate _test package # @@ -274,7 +285,7 @@ linters: # - exhaustruct # Checks if all structure fields are initialized - forbidigo # Forbids identifiers - - gochecknoglobals # check that no global variables exist + - gochecknoglobals # Check that no global variables exist. - goconst # Finds repeated strings that could be replaced by a constant - stylecheck # Stylecheck is a replacement for golint - tagliatelle # Checks the struct tags. From fb393f1c57cd72298d6684884bbb7419365a798e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:19:00 +0200 Subject: [PATCH 093/318] tests: bump yq, cfssl (#2952) --- Dockerfile | 2 +- Dockerfile.debian | 2 +- test/bats.mk | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1e311bfa89c..69de0f9df8f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core cd re2-${RE2_VERSION} && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.40.4 + go install github.com/mikefarah/yq/v4@v4.43.1 COPY . . diff --git a/Dockerfile.debian b/Dockerfile.debian index ee0fa32bbd9..9bcb517bb2d 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -21,7 +21,7 @@ RUN apt-get update && \ make && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.40.4 + go install github.com/mikefarah/yq/v4@v4.43.1 COPY . . diff --git a/test/bats.mk b/test/bats.mk index 0cc5deb9b7a..8f507cb659b 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -66,11 +66,11 @@ bats-check-requirements: ## Check dependencies for functional tests @$(TEST_DIR)/bin/check-requirements bats-update-tools: ## Install/update tools required for functional tests - # yq v4.40.4 - GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@1c3d55106075bd37df197b4bc03cb4a413fdb903 - # cfssl v1.6.4 - GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@b4d0d877cac528f63db39dfb62d5c96cd3a32a0b - GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@b4d0d877cac528f63db39dfb62d5c96cd3a32a0b + # yq v4.43.1 + GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@c35ec752e38ea0c096d3c44e13cfc0797ac394d8 + # cfssl v1.6.5 + GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda + GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda # Build and installs crowdsec in a local directory. Rebuilds if already exists. bats-build: bats-environment ## Build binaries for functional tests From b48b7283178b3f44bf0a60aa37cf28377cc46189 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:54:51 +0200 Subject: [PATCH 094/318] cscli support: include stack traces (#2935) --- .golangci.yml | 15 +++------- cmd/crowdsec-cli/main.go | 17 +++++++++-- cmd/crowdsec-cli/support.go | 60 +++++++++++++++++++++++++++---------- cmd/crowdsec/main.go | 9 +++++- cmd/crowdsec/serve.go | 2 +- go.mod | 2 +- go.sum | 4 +-- pkg/apiserver/apiserver.go | 11 +++++-- 8 files changed, 82 insertions(+), 38 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f38fa337a8d..cf13d9b6d8d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -37,17 +37,10 @@ linters-settings: statements: 122 govet: - enable: - - atomicalign - - deepequalerrors - # TODO: - fieldalignment - - findcall - - nilness - # TODO: - reflectvaluecompare - - shadow - - sortslice - - timeformat - - unusedwrite + enable-all: true + disable: + - reflectvaluecompare + - fieldalignment lll: # lower this after refactoring diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 446901e4aa9..9e721f1fac6 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -1,7 +1,9 @@ package main import ( + "fmt" "os" + "path/filepath" "slices" "time" @@ -10,14 +12,18 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -var ConfigFilePath string -var csConfig *csconfig.Config -var dbClient *database.Client +var ( + ConfigFilePath string + csConfig *csconfig.Config + dbClient *database.Client +) type configGetter func() *csconfig.Config @@ -82,6 +88,11 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { return nil, "", err } + // set up directory for trace files + if err := trace.Init(filepath.Join(config.ConfigPaths.DataDir, "trace")); err != nil { + return nil, "", fmt.Errorf("while setting up trace directory: %w", err) + } + return config, merged, nil } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 661950fa8f6..8b2481b4cf2 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -4,6 +4,7 @@ import ( "archive/zip" "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -12,12 +13,14 @@ import ( "path/filepath" "regexp" "strings" + "time" "github.com/blackfireio/osinfo" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -47,6 +50,7 @@ const ( SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" SUPPORT_ACQUISITION_CONFIG_BASE_PATH = "config/acquis/" SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" + SUPPORT_CRASH_PATH = "crash/" ) // from https://github.com/acarl005/stripansi @@ -62,7 +66,7 @@ func collectMetrics() ([]byte, []byte, error) { if csConfig.Cscli.PrometheusUrl == "" { log.Warn("No Prometheus URL configured, metrics will not be collected") - return nil, nil, fmt.Errorf("prometheus_uri is not set") + return nil, nil, errors.New("prometheus_uri is not set") } humanMetrics := bytes.NewBuffer(nil) @@ -70,7 +74,7 @@ func collectMetrics() ([]byte, []byte, error) { ms := NewMetricStore() if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil { - return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %s", err) + return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %w", err) } if err := ms.Format(humanMetrics, nil, "human", false); err != nil { @@ -79,21 +83,21 @@ func collectMetrics() ([]byte, []byte, error) { req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil) if err != nil { - return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err) + return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %w", err) } client := &http.Client{} resp, err := client.Do(req) if err != nil { - return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err) + return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %s", err) + return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %w", err) } return humanMetrics.Bytes(), body, nil @@ -121,19 +125,18 @@ func collectOSInfo() ([]byte, error) { log.Info("Collecting OS info") info, err := osinfo.GetOSInfo() - if err != nil { return nil, err } w := bytes.NewBuffer(nil) - w.WriteString(fmt.Sprintf("Architecture: %s\n", info.Architecture)) - w.WriteString(fmt.Sprintf("Family: %s\n", info.Family)) - w.WriteString(fmt.Sprintf("ID: %s\n", info.ID)) - w.WriteString(fmt.Sprintf("Name: %s\n", info.Name)) - w.WriteString(fmt.Sprintf("Codename: %s\n", info.Codename)) - w.WriteString(fmt.Sprintf("Version: %s\n", info.Version)) - w.WriteString(fmt.Sprintf("Build: %s\n", info.Build)) + fmt.Fprintf(w, "Architecture: %s\n", info.Architecture) + fmt.Fprintf(w, "Family: %s\n", info.Family) + fmt.Fprintf(w, "ID: %s\n", info.ID) + fmt.Fprintf(w, "Name: %s\n", info.Name) + fmt.Fprintf(w, "Codename: %s\n", info.Codename) + fmt.Fprintf(w, "Version: %s\n", info.Version) + fmt.Fprintf(w, "Build: %s\n", info.Build) return w.Bytes(), nil } @@ -163,7 +166,7 @@ func collectBouncers(dbClient *database.Client) ([]byte, error) { bouncers, err := dbClient.ListBouncers() if err != nil { - return nil, fmt.Errorf("unable to list bouncers: %s", err) + return nil, fmt.Errorf("unable to list bouncers: %w", err) } getBouncersTable(out, bouncers) @@ -176,7 +179,7 @@ func collectAgents(dbClient *database.Client) ([]byte, error) { machines, err := dbClient.ListMachines() if err != nil { - return nil, fmt.Errorf("unable to list machines: %s", err) + return nil, fmt.Errorf("unable to list machines: %w", err) } getAgentsTable(out, machines) @@ -264,6 +267,11 @@ func collectAcquisitionConfig() map[string][]byte { return ret } +func collectCrash() ([]string, error) { + log.Info("Collecting crash dumps") + return trace.List() +} + type cliSupport struct{} func NewCLISupport() *cliSupport { @@ -431,11 +439,31 @@ cscli support dump -f /tmp/crowdsec-support.zip } } + crash, err := collectCrash() + if err != nil { + log.Errorf("could not collect crash dumps: %s", err) + } + + for _, filename := range crash { + content, err := os.ReadFile(filename) + if err != nil { + log.Errorf("could not read crash dump %s: %s", filename, err) + } + + infos[SUPPORT_CRASH_PATH+filepath.Base(filename)] = content + } + w := bytes.NewBuffer(nil) zipWriter := zip.NewWriter(w) for filename, data := range infos { - fw, err := zipWriter.Create(filename) + header := &zip.FileHeader{ + Name: filename, + Method: zip.Deflate, + // TODO: retain mtime where possible (esp. trace) + Modified: time.Now(), + } + fw, err := zipWriter.CreateHeader(header) if err != nil { log.Errorf("Could not add zip entry for %s: %s", filename, err) continue diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 5f04e9b99a4..0d96692ba5f 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -6,6 +6,7 @@ import ( "fmt" _ "net/http/pprof" "os" + "path/filepath" "runtime" "runtime/pprof" "strings" @@ -14,6 +15,8 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" @@ -96,8 +99,8 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { buckets = leakybucket.NewBuckets() log.Infof("Loading %d scenario files", len(files)) - holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) + holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { return fmt.Errorf("scenario loading failed: %w", err) } @@ -230,6 +233,10 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo return nil, fmt.Errorf("while loading configuration file: %w", err) } + if err := trace.Init(filepath.Join(cConfig.ConfigPaths.DataDir, "trace")); err != nil { + return nil, fmt.Errorf("while setting up trace directory: %w", err) + } + cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags) if dumpFolder != "" { diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index c8ccd4d5d70..9da3d80106a 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -391,7 +391,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if cConfig.Common != nil && cConfig.Common.Daemonize { - csdaemon.NotifySystemd(log.StandardLogger()) + csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) // wait for signals return HandleSignals(cConfig) } diff --git a/go.mod b/go.mod index 04f34e6485e..70d819a4059 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.6 + github.com/crowdsecurity/go-cs-lib v0.0.10 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 29e23f02ab9..750439e4f0e 100644 --- a/go.sum +++ b/go.sum @@ -102,8 +102,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.6 h1:Ef6MylXe0GaJE9vrfvxEdbHb31+JUP1os+murPz7Pos= -github.com/crowdsecurity/go-cs-lib v0.0.6/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.10 h1:Twt/y/rYCUspGY1zxDnGurL2svRSREAz+2+puLepd9c= +github.com/crowdsecurity/go-cs-lib v0.0.10/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 7989cfc1d97..6592c8bbf76 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -84,11 +84,16 @@ func recoverFromPanic(c *gin.Context) { } if brokenPipe { - log.Warningf("client %s disconnected : %s", c.ClientIP(), err) + log.Warningf("client %s disconnected: %s", c.ClientIP(), err) c.Abort() } else { - filename := trace.WriteStackTrace(err) - log.Warningf("client %s error : %s", c.ClientIP(), err) + log.Warningf("client %s error: %s", c.ClientIP(), err) + + filename, err := trace.WriteStackTrace(err) + if err != nil { + log.Errorf("also while writing stacktrace: %s", err) + } + log.Warningf("stacktrace written to %s, please join to your issue", filename) c.AbortWithStatus(http.StatusInternalServerError) } From 718d1c54b2950653c3ab3bf20873c89f416be3ac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:15:27 +0200 Subject: [PATCH 095/318] pkg/database/decisiosn: remove filter parameter, which is always passed empty (#2954) --- .golangci.yml | 2 +- cmd/crowdsec/metrics.go | 12 ++-- pkg/database/decisions.go | 121 ++++++++++++++++++++++---------------- 3 files changed, 76 insertions(+), 59 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cf13d9b6d8d..ff46ef1c02a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -58,7 +58,7 @@ linters-settings: min-complexity: 28 nlreturn: - block-size: 4 + block-size: 5 nolintlint: allow-unused: false # report any unused nolint directives diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index d670051cea0..cc0c118b436 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -3,7 +3,6 @@ package main import ( "fmt" "net/http" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -22,7 +21,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/parser" ) -/*prometheus*/ +// Prometheus + var globalParserHits = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_parser_hits_total", @@ -30,6 +30,7 @@ var globalParserHits = prometheus.NewCounterVec( }, []string{"source", "type"}, ) + var globalParserHitsOk = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_parser_hits_ok_total", @@ -37,6 +38,7 @@ var globalParserHitsOk = prometheus.NewCounterVec( }, []string{"source", "type"}, ) + var globalParserHitsKo = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_parser_hits_ko_total", @@ -116,9 +118,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return } - decisionsFilters := make(map[string][]string, 0) - - decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters) + decisions, err := dbClient.QueryDecisionCountByScenario() if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) @@ -139,7 +139,6 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha } alerts, err := dbClient.AlertsCountPerScenario(alertsFilter) - if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) @@ -194,7 +193,6 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, defer trace.CatchPanic("crowdsec/servePrometheus") http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient)) - log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { // in time machine, we most likely have the LAPI using the port diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 3175a916ff5..20a49c79143 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -37,6 +37,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if v[0] == "false" { query = query.Where(decision.SimulatedEQ(false)) } + delete(filter, "simulated") } else { query = query.Where(decision.SimulatedEQ(false)) @@ -49,7 +50,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } - case "scopes", "scope": //Swagger mentions both of them, let's just support both to make sure we don't break anything + case "scopes", "scope": // Swagger mentions both of them, let's just support both to make sure we don't break anything scopes := strings.Split(value[0], ",") for i, scope := range scopes { switch strings.ToLower(scope) { @@ -63,6 +64,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] scopes[i] = types.AS } } + query = query.Where(decision.ScopeIn(scopes...)) case "value": query = query.Where(decision.ValueEQ(value[0])) @@ -164,11 +166,11 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return data, nil } -func (c *Client) QueryDecisionCountByScenario(filters map[string][]string) ([]*DecisionsByScenario, error) { +func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) - query, err := BuildDecisionRequestWithFilter(query, filters) + query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string)) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) @@ -277,10 +279,12 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[ decision.CreatedAtGT(since), decision.UntilGT(time.Now().UTC()), ) - //Allow a bouncer to ask for non-deduplicated results + + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } + query, err := BuildDecisionRequestWithFilter(query, filters) if err != nil { c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) @@ -294,17 +298,20 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[ c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) } + return data, nil } -func (c *Client) DeleteDecisionById(decisionId int) ([]*ent.Decision, error) { - toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionId)).All(c.CTX) +func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { + toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) if err != nil { c.Log.Warningf("DeleteDecisionById : %s", err) - return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId) + return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) } + count, err := c.BulkDeleteDecisions(toDelete, false) c.Log.Debugf("deleted %d decisions", count) + return toDelete, err } @@ -317,6 +324,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, else, return bans that are *contained* by the given value (value is the outer) */ decisions := c.Ent.Decision.Query() + for param, value := range filter { switch param { case "contains": @@ -359,48 +367,48 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, } else if ip_sz == 16 { if contains { /*decision contains {start_ip,end_ip}*/ decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip decision.StartIPLT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix decision.StartSuffixLTE(start_sfx), )), decision.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip decision.EndIPGT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix decision.EndSuffixGTE(end_sfx), ), ), )) } else { decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip decision.StartIPGT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix decision.StartSuffixGTE(start_sfx), )), decision.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip decision.EndIPLT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix decision.EndSuffixLTE(end_sfx), ), ), @@ -415,11 +423,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } + count, err := c.BulkDeleteDecisions(toDelete, false) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } + return strconv.Itoa(count), toDelete, nil } @@ -432,6 +442,7 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC())) + for param, value := range filter { switch param { case "contains": @@ -480,24 +491,24 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri /*decision contains {start_ip,end_ip}*/ if contains { decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip decision.StartIPLT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix decision.StartSuffixLTE(start_sfx), )), decision.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip decision.EndIPGT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix decision.EndSuffixGTE(end_sfx), ), ), @@ -505,24 +516,24 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri } else { /*decision is contained within {start_ip,end_ip}*/ decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip decision.StartIPGT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix decision.StartSuffixGTE(start_sfx), )), decision.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip decision.EndIPLT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix decision.EndSuffixLTE(end_sfx), ), ), @@ -531,6 +542,7 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri } else if ip_sz != 0 { return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } + DecisionsToDelete, err := decisions.All(c.CTX) if err != nil { c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err) @@ -541,13 +553,14 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri if err != nil { return "0", nil, errors.Wrapf(DeleteFail, "soft delete decisions with provided filter : %s", err) } + return strconv.Itoa(count), DecisionsToDelete, err } -// BulkDeleteDecisions set the expiration of a bulk of decisions to now() or hard deletes them. +// BulkDeleteDecisions sets the expiration of a bulk of decisions to now() or hard deletes them. // We are doing it this way so we can return impacted decisions for sync with CAPI/PAPI func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDelete bool) (int, error) { - const bulkSize = 256 //scientifically proven to be the best value for bulk delete + const bulkSize = 256 // scientifically proven to be the best value for bulk delete var ( nbUpdates int @@ -576,6 +589,7 @@ func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDele return totalUpdates, fmt.Errorf("hard delete decisions with provided filter: %w", err) } } + totalUpdates += nbUpdates } @@ -612,6 +626,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { contains := true decisions := c.Ent.Decision.Query() + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") @@ -667,6 +682,7 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz decision.IPSizeEQ(int64(ip_sz)), )) } + return decisions, nil } @@ -674,24 +690,24 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz /*decision contains {start_ip,end_ip}*/ if contains { decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip decision.StartIPLT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix decision.StartSuffixLTE(start_sfx), )), decision.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip decision.EndIPGT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix decision.EndSuffixGTE(end_sfx), ), ), @@ -699,29 +715,30 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz } else { /*decision is contained within {start_ip,end_ip}*/ decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip decision.StartIPGT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix decision.StartSuffixGTE(start_sfx), )), decision.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip decision.EndIPLT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix decision.EndSuffixLTE(end_sfx), ), ), )) } + return decisions, nil } @@ -735,8 +752,10 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz func decisionPredicatesFromStr(s string, predicateFunc func(string) predicate.Decision) []predicate.Decision { words := strings.Split(s, ",") predicates := make([]predicate.Decision, len(words)) + for i, word := range words { predicates[i] = predicateFunc(word) } + return predicates } From c64332d30abf725e0233bbd89874cec4bc8b0419 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:28:38 +0200 Subject: [PATCH 096/318] cscli config show: avoid globals, use yaml v3 (#2863) * cscli config show: avoid globals, use yaml v3 * lint (whitespace/errors) --- cmd/crowdsec-cli/alerts.go | 11 ++++--- cmd/crowdsec-cli/config_show.go | 46 +++++++++++++++------------- cmd/crowdsec-cli/console.go | 16 +++++----- cmd/crowdsec-cli/copyfile.go | 3 +- cmd/crowdsec-cli/decisions.go | 9 +++--- cmd/crowdsec-cli/decisions_import.go | 12 ++++---- cmd/crowdsec-cli/doc.go | 2 ++ cmd/crowdsec-cli/explain.go | 8 +++-- 8 files changed, 60 insertions(+), 47 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 908466f9eb2..59dff809458 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -4,6 +4,7 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "net/url" "os" @@ -204,6 +205,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("parsing api url %s: %w", apiURL, err) } + cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), @@ -211,7 +213,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { URL: apiURL, VersionPrefix: "v1", }) - if err != nil { return fmt.Errorf("new api client: %w", err) } @@ -229,7 +230,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { } func (cli *cliAlerts) NewListCmd() *cobra.Command { - var alertListFilter = apiclient.AlertsListOpts{ + alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -363,7 +364,7 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { delAlertByID string ) - var alertDeleteFilter = apiclient.AlertsDeleteOpts{ + alertDeleteFilter := apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -391,7 +392,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && *alertDeleteFilter.RangeEquals == "" && delAlertByID == "" { _ = cmd.Usage() - return fmt.Errorf("at least one filter or --all must be specified") + return errors.New("at least one filter or --all must be specified") } return nil @@ -478,7 +479,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { cfg := cli.cfg() if len(args) == 0 { printHelp(cmd) - return fmt.Errorf("missing alert_id") + return errors.New("missing alert_id") } for _, alertID := range args { id, err := strconv.Atoi(alertID) diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index c277173c387..c7138c98e33 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -10,13 +10,15 @@ import ( "github.com/sanity-io/litter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) -func showConfigKey(key string) error { +func (cli *cliConfig) showKey(key string) error { + cfg := cli.cfg() + type Env struct { Config *csconfig.Config } @@ -30,15 +32,15 @@ func showConfigKey(key string) error { return err } - output, err := expr.Run(program, Env{Config: csConfig}) + output, err := expr.Run(program, Env{Config: cfg}) if err != nil { return err } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human", "raw": // Don't use litter for strings, it adds quotes - // that we didn't have before + // that would break compatibility with previous versions switch output.(type) { case string: fmt.Println(output) @@ -51,13 +53,14 @@ func showConfigKey(key string) error { return fmt.Errorf("failed to marshal configuration: %w", err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) } return nil } -var configShowTemplate = `Global: +func (cli *cliConfig) template() string { + return `Global: {{- if .ConfigPaths }} - Configuration Folder : {{.ConfigPaths.ConfigDir}} @@ -182,19 +185,11 @@ Central API: {{- end }} {{- end }} ` +} -func (cli *cliConfig) show(key string) error { +func (cli *cliConfig) show() error { cfg := cli.cfg() - if err := cfg.LoadAPIClient(); err != nil { - log.Errorf("failed to load API client configuration: %s", err) - // don't return, we can still show the configuration - } - - if key != "" { - return showConfigKey(key) - } - switch cfg.Cscli.Output { case "human": // The tests on .Enable look funny because the option has a true default which has @@ -205,7 +200,7 @@ func (cli *cliConfig) show(key string) error { "ValueBool": func(b *bool) bool { return b != nil && *b }, } - tmp, err := template.New("config").Funcs(funcs).Parse(configShowTemplate) + tmp, err := template.New("config").Funcs(funcs).Parse(cli.template()) if err != nil { return err } @@ -220,14 +215,14 @@ func (cli *cliConfig) show(key string) error { return fmt.Errorf("failed to marshal configuration: %w", err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) case "raw": data, err := yaml.Marshal(cfg) if err != nil { return fmt.Errorf("failed to marshal configuration: %w", err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) } return nil @@ -243,7 +238,16 @@ func (cli *cliConfig) newShowCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.show(key) + if err := cli.cfg().LoadAPIClient(); err != nil { + log.Errorf("failed to load API client configuration: %s", err) + // don't return, we can still show the configuration + } + + if key != "" { + return cli.showKey(key) + } + + return cli.show() }, } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index b1912825c06..9e881a43f53 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -4,9 +4,11 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "net/url" "os" + "strconv" "strings" "github.com/fatih/color" @@ -36,7 +38,7 @@ func NewCLIConsole(cfg configGetter) *cliConsole { } func (cli *cliConsole) NewCommand() *cobra.Command { - var cmd = &cobra.Command{ + cmd := &cobra.Command{ Use: "console [action]", Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)", Args: cobra.MinimumNArgs(1), @@ -203,7 +205,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("All features have been enabled successfully") } else { if len(args) == 0 { - return fmt.Errorf("you must specify at least one feature to enable") + return errors.New("you must specify at least one feature to enable") } if err := cli.setConsoleOpts(args, true); err != nil { return err @@ -288,11 +290,11 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { } rows := [][]string{ - {csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareManualDecisions)}, - {csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareCustomScenarios)}, - {csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareTaintedScenarios)}, - {csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *consoleCfg.ShareContext)}, - {csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *consoleCfg.ConsoleManagement)}, + {csconfig.SEND_MANUAL_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareManualDecisions)}, + {csconfig.SEND_CUSTOM_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareCustomScenarios)}, + {csconfig.SEND_TAINTED_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareTaintedScenarios)}, + {csconfig.SEND_CONTEXT, strconv.FormatBool(*consoleCfg.ShareContext)}, + {csconfig.CONSOLE_MANAGEMENT, strconv.FormatBool(*consoleCfg.ConsoleManagement)}, } for _, row := range rows { err = csvwriter.Write(row) diff --git a/cmd/crowdsec-cli/copyfile.go b/cmd/crowdsec-cli/copyfile.go index 332f744be80..272fb3f7851 100644 --- a/cmd/crowdsec-cli/copyfile.go +++ b/cmd/crowdsec-cli/copyfile.go @@ -9,7 +9,6 @@ import ( log "github.com/sirupsen/logrus" ) - /*help to copy the file, ioutil doesn't offer the feature*/ func copyFileContents(src, dst string) (err error) { @@ -69,6 +68,7 @@ func CopyFile(sourceSymLink, destinationFile string) error { if !(destinationFileStat.Mode().IsRegular()) { return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) } + if os.SameFile(sourceFileStat, destinationFileStat) { return err } @@ -80,4 +80,3 @@ func CopyFile(sourceSymLink, destinationFile string) error { return err } - diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index a97536ddc14..3fb790633b5 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -4,6 +4,7 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "net/url" "os" @@ -346,7 +347,7 @@ cscli decisions add --scope username --value foobar addScope = types.Range } else if addValue == "" { printHelp(cmd) - return fmt.Errorf("missing arguments, a value is required (--ip, --range or --scope and --value)") + return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)") } if addReason == "" { @@ -371,7 +372,7 @@ cscli decisions add --scope username --value foobar Scenario: &addReason, ScenarioVersion: &empty, Simulated: &simulated, - //setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes + // setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes Source: &models.Source{ AsName: empty, AsNumber: empty, @@ -411,7 +412,7 @@ cscli decisions add --scope username --value foobar } func (cli *cliDecisions) newDeleteCmd() *cobra.Command { - var delFilter = apiclient.DecisionsDeleteOpts{ + delFilter := apiclient.DecisionsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), TypeEquals: new(string), @@ -448,7 +449,7 @@ cscli decisions delete --origin lists --scenario list_name *delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" && *delFilter.OriginEquals == "" && delDecisionID == "" { cmd.Usage() - return fmt.Errorf("at least one filter or --all must be specified") + return errors.New("at least one filter or --all must be specified") } return nil diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 45d1841a603..8c36bd5dc92 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -5,6 +5,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -81,7 +82,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultDuration == "" { - return fmt.Errorf("--duration cannot be empty") + return errors.New("--duration cannot be empty") } defaultScope, err := flags.GetString("scope") @@ -90,7 +91,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultScope == "" { - return fmt.Errorf("--scope cannot be empty") + return errors.New("--scope cannot be empty") } defaultReason, err := flags.GetString("reason") @@ -99,7 +100,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultReason == "" { - return fmt.Errorf("--reason cannot be empty") + return errors.New("--reason cannot be empty") } defaultType, err := flags.GetString("type") @@ -108,7 +109,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultType == "" { - return fmt.Errorf("--type cannot be empty") + return errors.New("--type cannot be empty") } batchSize, err := flags.GetInt("batch") @@ -136,7 +137,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if format == "" { - return fmt.Errorf("unable to guess format from file extension, please provide a format with --format flag") + return errors.New("unable to guess format from file extension, please provide a format with --format flag") } if input == "-" { @@ -235,7 +236,6 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { return nil } - func (cli *cliDecisions) newImportCmd() *cobra.Command { cmd := &cobra.Command{ Use: "import [options]", diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index a4896f3da30..4b1d50d1583 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -39,8 +39,10 @@ id: %s title: %s --- ` + name := filepath.Base(filename) base := strings.TrimSuffix(name, filepath.Ext(name)) + return fmt.Sprintf(header, base, strings.ReplaceAll(base, "_", " ")) } diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/explain.go index ce323fd0ce1..c322cce47fe 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/explain.go @@ -83,7 +83,7 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { fileInfo, _ := os.Stdin.Stat() if cli.flags.logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { - return fmt.Errorf("the option -f - is intended to work with pipes") + return errors.New("the option -f - is intended to work with pipes") } return nil @@ -160,18 +160,22 @@ func (cli *cliExplain) run() error { } else if logFile == "-" { reader := bufio.NewReader(os.Stdin) errCount := 0 + for { input, err := reader.ReadBytes('\n') if err != nil && errors.Is(err, io.EOF) { break } + if len(input) > 1 { _, err = f.Write(input) } + if err != nil || len(input) <= 1 { errCount++ } } + if errCount > 0 { log.Warnf("Failed to write %d lines to %s", errCount, tmpFile) } @@ -207,7 +211,7 @@ func (cli *cliExplain) run() error { } if dsn == "" { - return fmt.Errorf("no acquisition (--file or --dsn) provided, can't run cscli test") + return errors.New("no acquisition (--file or --dsn) provided, can't run cscli test") } cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} From 725cae1fa88fd533045b902e0ff4372936dcf8cf Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:41:50 +0200 Subject: [PATCH 097/318] CI: upload coverage with token (#2958) --- .github/workflows/bats-sqlite-coverage.yml | 1 + .github/workflows/go-tests-windows.yml | 1 + .github/workflows/go-tests.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index d56d69f28b2..742d1ee65ff 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -81,3 +81,4 @@ jobs: with: files: ./coverage-bats.out flags: bats + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 781f2a4a930..9d5c1739745 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -52,6 +52,7 @@ jobs: with: files: coverage.out flags: unit-windows + token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint uses: golangci/golangci-lint-action@v4 diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 67f73d81a45..4eac3777df9 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -153,6 +153,7 @@ jobs: with: files: coverage.out flags: unit-linux + token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint uses: golangci/golangci-lint-action@v4 From 97e6588a45f23931c64f83b584a9af7b3e58db80 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Apr 2024 10:05:55 +0200 Subject: [PATCH 098/318] cscli hub items: avoid global (#2960) * cscli hub items: avoid global * lint (whitespace, errors) * lint --- cmd/crowdsec-cli/hubappsec.go | 12 ++++---- cmd/crowdsec-cli/hubcollection.go | 3 +- cmd/crowdsec-cli/hubcontext.go | 3 +- cmd/crowdsec-cli/hubparser.go | 3 +- cmd/crowdsec-cli/hubpostoverflow.go | 3 +- cmd/crowdsec-cli/hubscenario.go | 3 +- cmd/crowdsec-cli/hubtest.go | 3 +- cmd/crowdsec-cli/item_metrics.go | 30 +++++++++++++++++++ cmd/crowdsec-cli/itemcli.go | 46 ++++++++++++++++++----------- cmd/crowdsec-cli/items.go | 8 ++--- cmd/crowdsec-cli/lapi.go | 3 +- cmd/crowdsec-cli/main.go | 14 ++++----- 12 files changed, 90 insertions(+), 41 deletions(-) diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/hubappsec.go index ff41ad5f9ad..7ee578edc2f 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/hubappsec.go @@ -13,8 +13,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIAppsecConfig() *cliItem { +func NewCLIAppsecConfig(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.APPSEC_CONFIGS, singular: "appsec-config", oneOrMore: "appsec-config(s)", @@ -46,7 +47,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, } } -func NewCLIAppsecRule() *cliItem { +func NewCLIAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode if csConfig.Cscli.Output != "human" { @@ -57,11 +58,11 @@ func NewCLIAppsecRule() *cliItem { yamlContent, err := os.ReadFile(item.State.LocalPath) if err != nil { - return fmt.Errorf("unable to read file %s : %s", item.State.LocalPath, err) + return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) } if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to unmarshal yaml file %s : %s", item.State.LocalPath, err) + return fmt.Errorf("unable to unmarshal yaml file %s: %w", item.State.LocalPath, err) } for _, ruleType := range appsec_rule.SupportedTypes() { @@ -70,7 +71,7 @@ func NewCLIAppsecRule() *cliItem { for _, rule := range appsecRule.Rules { convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) if err != nil { - return fmt.Errorf("unable to convert rule %s : %s", rule.Name, err) + return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) } fmt.Println(convertedRule) @@ -88,6 +89,7 @@ func NewCLIAppsecRule() *cliItem { } return &cliItem{ + cfg: cfg, name: "appsec-rules", singular: "appsec-rule", oneOrMore: "appsec-rule(s)", diff --git a/cmd/crowdsec-cli/hubcollection.go b/cmd/crowdsec-cli/hubcollection.go index dee9a0b9e66..655b36eb1b8 100644 --- a/cmd/crowdsec-cli/hubcollection.go +++ b/cmd/crowdsec-cli/hubcollection.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLICollection() *cliItem { +func NewCLICollection(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.COLLECTIONS, singular: "collection", oneOrMore: "collection(s)", diff --git a/cmd/crowdsec-cli/hubcontext.go b/cmd/crowdsec-cli/hubcontext.go index 630dbb2f7b6..2a777327379 100644 --- a/cmd/crowdsec-cli/hubcontext.go +++ b/cmd/crowdsec-cli/hubcontext.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIContext() *cliItem { +func NewCLIContext(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.CONTEXTS, singular: "context", oneOrMore: "context(s)", diff --git a/cmd/crowdsec-cli/hubparser.go b/cmd/crowdsec-cli/hubparser.go index 0b224c8a7f6..cc856cbedb9 100644 --- a/cmd/crowdsec-cli/hubparser.go +++ b/cmd/crowdsec-cli/hubparser.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIParser() *cliItem { +func NewCLIParser(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.PARSERS, singular: "parser", oneOrMore: "parser(s)", diff --git a/cmd/crowdsec-cli/hubpostoverflow.go b/cmd/crowdsec-cli/hubpostoverflow.go index 908ccbea0fd..3fd45fd113d 100644 --- a/cmd/crowdsec-cli/hubpostoverflow.go +++ b/cmd/crowdsec-cli/hubpostoverflow.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIPostOverflow() *cliItem { +func NewCLIPostOverflow(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.POSTOVERFLOWS, singular: "postoverflow", oneOrMore: "postoverflow(s)", diff --git a/cmd/crowdsec-cli/hubscenario.go b/cmd/crowdsec-cli/hubscenario.go index 1de2182bfc5..4434b9a2c45 100644 --- a/cmd/crowdsec-cli/hubscenario.go +++ b/cmd/crowdsec-cli/hubscenario.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIScenario() *cliItem { +func NewCLIScenario(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.SCENARIOS, singular: "scenario", oneOrMore: "scenario(s)", diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index d6ed4560056..51735ce19ce 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -135,6 +135,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios // create empty nuclei template file nucleiFileName := fmt.Sprintf("%s.yaml", testName) nucleiFilePath := filepath.Join(testPath, nucleiFileName) + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755) if err != nil { return err @@ -405,7 +406,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { } func (cli *cliHubTest) NewCleanCmd() *cobra.Command { - var cmd = &cobra.Command{ + cmd := &cobra.Command{ Use: "clean", Short: "clean [test_name]", Args: cobra.MinimumNArgs(1), diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index e6f27ae5d0d..b571fb1c5ed 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -37,6 +37,7 @@ func ShowMetrics(hubItem *cwhub.Item) error { appsecMetricsTable(color.Output, hubItem.Name, metrics) default: // no metrics for this item type } + return nil } @@ -49,21 +50,27 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["name"] if !ok { log.Debugf("no name in Metric %v", metric.Labels) } + if name != itemName { continue } + source, ok := metric.Labels["source"] + if !ok { log.Debugf("no source in Metric %v", metric.Labels) } else { @@ -71,12 +78,15 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { source = srctype + ":" + source } } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) continue } + ival := int(fval) switch fam.Name { @@ -119,6 +129,7 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { } } } + return stats } @@ -136,26 +147,34 @@ func GetScenarioMetric(url string, itemName string) map[string]int { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["name"] + if !ok { log.Debugf("no name in Metric %v", metric.Labels) } + if name != itemName { continue } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) continue } + ival := int(fval) switch fam.Name { @@ -174,6 +193,7 @@ func GetScenarioMetric(url string, itemName string) map[string]int { } } } + return stats } @@ -188,17 +208,22 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["rule_name"] + if !ok { log.Debugf("no rule_name in Metric %v", metric.Labels) } + if name != itemName { continue } @@ -209,11 +234,13 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { } value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) continue } + ival := int(fval) switch fam.Name { @@ -231,6 +258,7 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { } } } + return stats } @@ -247,6 +275,7 @@ func GetPrometheusMetric(url string) []*prom2json.Family { go func() { defer trace.CatchPanic("crowdsec/GetPrometheusMetric") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { log.Fatalf("failed to fetch prometheus metrics : %v", err) @@ -257,6 +286,7 @@ func GetPrometheusMetric(url string) []*prom2json.Family { for mf := range mfChan { result = append(result, prom2json.NewFamily(mf)) } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) return result diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 4f3dc40ae04..c2614068fd7 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "os" "strings" @@ -28,6 +29,7 @@ type cliHelp struct { } type cliItem struct { + cfg configGetter name string // plural, as used in the hub index singular string oneOrMore string // parenthetical pluralizaion: "parser(s)" @@ -61,7 +63,9 @@ func (cli cliItem) NewCommand() *cobra.Command { } func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error { - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) if err != nil { return err } @@ -71,7 +75,7 @@ func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreE if item == nil { msg := suggestNearestMessage(hub, cli.name, name) if !ignoreError { - return fmt.Errorf(msg) + return errors.New(msg) } log.Errorf(msg) @@ -107,10 +111,10 @@ func (cli cliItem) newInstallCmd() *cobra.Command { Example: cli.installHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.install(args, downloadOnly, force, ignoreError) }, } @@ -137,7 +141,7 @@ func istalledParentNames(item *cwhub.Item) []string { } func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { - hub, err := require.Hub(csConfig, nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err } @@ -163,6 +167,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error if didRemove { log.Infof("Removed %s", item.Name) + removed++ } } @@ -204,6 +209,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error if didRemove { log.Infof("Removed %s", item.Name) + removed++ } } @@ -231,10 +237,10 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { Example: cli.removeHelp.example, Aliases: []string{"delete"}, DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.remove(args, purge, force, all) }, } @@ -248,7 +254,9 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { } func (cli cliItem) upgrade(args []string, force bool, all bool) error { - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) if err != nil { return err } @@ -300,6 +308,7 @@ func (cli cliItem) upgrade(args []string, force bool, all bool) error { if didUpdate { log.Infof("Updated %s", item.Name) + updated++ } } @@ -323,10 +332,10 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), Example: cli.upgradeHelp.example, DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.upgrade(args, force, all) }, } @@ -339,21 +348,23 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { } func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error { + cfg := cli.cfg() + if rev && !diff { - return fmt.Errorf("--rev can only be used with --diff") + return errors.New("--rev can only be used with --diff") } if url != "" { - csConfig.Cscli.PrometheusUrl = url + cfg.Cscli.PrometheusUrl = url } remote := (*cwhub.RemoteHubCfg)(nil) if diff { - remote = require.RemoteHub(csConfig) + remote = require.RemoteHub(cfg) } - hub, err := require.Hub(csConfig, remote, log.StandardLogger()) + hub, err := require.Hub(cfg, remote, log.StandardLogger()) if err != nil { return err } @@ -399,10 +410,10 @@ func (cli cliItem) newInspectCmd() *cobra.Command { Example: cli.inspectHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.inspect(args, url, diff, rev, noMetrics) }, } @@ -417,7 +428,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command { } func (cli cliItem) list(args []string, all bool) error { - hub, err := require.Hub(csConfig, nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err } @@ -526,6 +537,7 @@ func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) st // hack: avoid message "item is tainted by itself" continue } + ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) } } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index ea6d8a25631..b8c83809dec 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -116,7 +116,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item } if err := csvwriter.Write(header); err != nil { - return fmt.Errorf("failed to write header: %s", err) + return fmt.Errorf("failed to write header: %w", err) } for _, itemType := range itemTypes { @@ -132,7 +132,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item } if err := csvwriter.Write(row); err != nil { - return fmt.Errorf("failed to write raw output: %s", err) + return fmt.Errorf("failed to write raw output: %w", err) } } } @@ -150,12 +150,12 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error { enc.SetIndent(2) if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %s", err) + return fmt.Errorf("unable to encode item: %w", err) } case "json": b, err := json.MarshalIndent(*item, "", " ") if err != nil { - return fmt.Errorf("unable to marshal item: %s", err) + return fmt.Errorf("unable to marshal item: %w", err) } fmt.Print(string(b)) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 13a9d8d7e77..51f372cc2d1 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -116,7 +116,6 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e URL: apiurl, VersionPrefix: LAPIURLPrefix, }, nil) - if err != nil { return fmt.Errorf("api client register: %w", err) } @@ -585,7 +584,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { } func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { - var ret = make([]string, 0) + ret := make([]string, 0) for _, subnode := range node.LeavesNodes { if subnode.Grok.RunTimeRegexp != nil { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 9e721f1fac6..0705faa4065 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -260,13 +260,13 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICollection().NewCommand()) - cmd.AddCommand(NewCLIParser().NewCommand()) - cmd.AddCommand(NewCLIScenario().NewCommand()) - cmd.AddCommand(NewCLIPostOverflow().NewCommand()) - cmd.AddCommand(NewCLIContext().NewCommand()) - cmd.AddCommand(NewCLIAppsecConfig().NewCommand()) - cmd.AddCommand(NewCLIAppsecRule().NewCommand()) + cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIPostOverflow(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIContext(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIAppsecConfig(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { cmd.AddCommand(NewSetupCmd()) From 0f942a95f112bc0ad4efdcb18b33c0d1d20e44fb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:09:37 +0200 Subject: [PATCH 099/318] pkg/cwhub - rename methods for clarity (#2961) * pkg/cwhub - rename methods for clarity * lint --- cmd/crowdsec-cli/capi.go | 2 +- cmd/crowdsec-cli/console.go | 2 +- cmd/crowdsec-cli/hub.go | 4 ++-- cmd/crowdsec-cli/item_suggest.go | 2 +- cmd/crowdsec-cli/itemcli.go | 6 +++--- cmd/crowdsec-cli/items.go | 2 +- cmd/crowdsec-cli/lapi.go | 2 +- cmd/crowdsec-cli/notifications.go | 13 +++++++------ cmd/crowdsec-cli/papi.go | 20 ++++++++------------ cmd/crowdsec-cli/require/require.go | 11 ++++++----- cmd/crowdsec-cli/setup.go | 9 ++++++++- cmd/crowdsec-cli/simulation.go | 23 +++++++++++++---------- cmd/crowdsec-cli/support.go | 2 +- cmd/crowdsec/lapiclient.go | 8 ++++---- pkg/alertcontext/config.go | 2 +- pkg/cwhub/hub.go | 18 +++++++++--------- 16 files changed, 67 insertions(+), 59 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index e56a8a74707..ec11acbbec4 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -175,7 +175,7 @@ func (cli *cliCapi) status() error { return err } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get scenarios: %w", err) } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 9e881a43f53..4aba689aa05 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -103,7 +103,7 @@ After running this command your will need to validate the enrollment in the weba return err } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get installed scenarios: %w", err) } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 600e56889f7..71347a5a57b 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct { +type cliHub struct{ cfg configGetter } @@ -137,7 +137,7 @@ func (cli *cliHub) upgrade(force bool) error { } for _, itemType := range cwhub.ItemTypes { - items, err := hub.GetInstalledItems(itemType) + items, err := hub.GetInstalledItemsByType(itemType) if err != nil { return err } diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/item_suggest.go index d3beee72100..9272abedd50 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/item_suggest.go @@ -61,7 +61,7 @@ func compInstalledItems(itemType string, args []string, toComplete string) ([]st return nil, cobra.ShellCompDirectiveDefault } - items, err := hub.GetInstalledItemNames(itemType) + items, err := hub.GetInstalledNamesByType(itemType) if err != nil { cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true) return nil, cobra.ShellCompDirectiveDefault diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index c2614068fd7..cdaf7877054 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -147,9 +147,9 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error } if all { - getter := hub.GetInstalledItems + getter := hub.GetInstalledItemsByType if purge { - getter = hub.GetAllItems + getter = hub.GetItemsByType } items, err := getter(cli.name) @@ -262,7 +262,7 @@ func (cli cliItem) upgrade(args []string, force bool, all bool) error { } if all { - items, err := hub.GetInstalledItems(cli.name) + items, err := hub.GetInstalledItemsByType(cli.name) if err != nil { return err } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index b8c83809dec..4ae643151f2 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -17,7 +17,7 @@ import ( // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { - itemNames := hub.GetItemNames(itemType) + itemNames := hub.GetNamesByType(itemType) notExist := []string{} diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 51f372cc2d1..99c3ee45a13 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -56,7 +56,7 @@ func (cli *cliLapi) status() error { return err } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get scenarios: %w", err) } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index f12333a3942..0b5ee537806 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -4,6 +4,7 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "io/fs" "net/url" @@ -88,7 +89,7 @@ func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfi return fmt.Errorf("error while traversing directory %s: %w", path, err) } - name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice + name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) // Avoid calling info.Name() twice if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) { ts, err := csplugin.ParsePluginConfigFile(name) if err != nil { @@ -266,7 +267,7 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command { if !ok { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } - //Create a single profile with plugin name as notification name + // Create a single profile with plugin name as notification name return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ @@ -320,8 +321,8 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command { Alert: alert, } - //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent - pluginTomb.Kill(fmt.Errorf("terminating")) + // time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent + pluginTomb.Kill(errors.New("terminating")) pluginTomb.Wait() return nil @@ -416,8 +417,8 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not break } } - //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent - pluginTomb.Kill(fmt.Errorf("terminating")) + // time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent + pluginTomb.Kill(errors.New("terminating")) pluginTomb.Wait() return nil diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index e18af94d4bb..5808fcce5f6 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -64,25 +64,22 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { cfg := cli.cfg() dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %s", err) + return fmt.Errorf("unable to initialize database client: %w", err) } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %s", err) + return fmt.Errorf("unable to initialize API client: %w", err) } papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %s", err) + return fmt.Errorf("unable to initialize PAPI client: %w", err) } perms, err := papi.GetPermissions() - if err != nil { - return fmt.Errorf("unable to get PAPI permissions: %s", err) + return fmt.Errorf("unable to get PAPI permissions: %w", err) } var lastTimestampStr *string lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey) @@ -118,27 +115,26 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %s", err) + return fmt.Errorf("unable to initialize database client: %w", err) } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { - return fmt.Errorf("unable to initialize API client: %s", err) + return fmt.Errorf("unable to initialize API client: %w", err) } t.Go(apic.Push) papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %s", err) + return fmt.Errorf("unable to initialize PAPI client: %w", err) } t.Go(papi.SyncDecisions) err = papi.PullOnce(time.Time{}, true) - if err != nil { - return fmt.Errorf("unable to sync decisions: %s", err) + return fmt.Errorf("unable to sync decisions: %w", err) } log.Infof("Sending acknowledgements to CAPI") diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 0f5ce182d9a..708b2d1c7a2 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -1,6 +1,7 @@ package require import ( + "errors" "fmt" "io" @@ -16,7 +17,7 @@ func LAPI(c *csconfig.Config) error { } if c.DisableAPI { - return fmt.Errorf("local API is disabled -- this command must be run on the local API machine") + return errors.New("local API is disabled -- this command must be run on the local API machine") } return nil @@ -32,7 +33,7 @@ func CAPI(c *csconfig.Config) error { func PAPI(c *csconfig.Config) error { if c.API.Server.OnlineClient.Credentials.PapiURL == "" { - return fmt.Errorf("no PAPI URL in configuration") + return errors.New("no PAPI URL in configuration") } return nil @@ -40,7 +41,7 @@ func PAPI(c *csconfig.Config) error { func CAPIRegistered(c *csconfig.Config) error { if c.API.Server.OnlineClient.Credentials == nil { - return fmt.Errorf("the Central API (CAPI) must be configured with 'cscli capi register'") + return errors.New("the Central API (CAPI) must be configured with 'cscli capi register'") } return nil @@ -56,7 +57,7 @@ func DB(c *csconfig.Config) error { func Notifications(c *csconfig.Config) error { if c.ConfigPaths.NotificationDir == "" { - return fmt.Errorf("config_paths.notification_dir is not set in crowdsec config") + return errors.New("config_paths.notification_dir is not set in crowdsec config") } return nil @@ -82,7 +83,7 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) local := c.Hub if local == nil { - return nil, fmt.Errorf("you must configure cli before interacting with hub") + return nil, errors.New("you must configure cli before interacting with hub") } if logger == nil { diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 48dcee08905..ba3670848d8 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "errors" "fmt" "os" "os/exec" @@ -118,9 +119,11 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { switch detectConfigFile { case "-": log.Tracef("Reading detection rules from stdin") + detectReader = os.Stdin default: log.Tracef("Reading detection rules: %s", detectConfigFile) + detectReader, err = os.Open(detectConfigFile) if err != nil { return err @@ -171,6 +174,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { _, err := exec.LookPath("systemctl") if err != nil { log.Debug("systemctl not available: snubbing systemd") + snubSystemd = true } } @@ -182,6 +186,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { if forcedOSFamily == "" && forcedOSID != "" { log.Debug("force-os-id is set: force-os-family defaults to 'linux'") + forcedOSFamily = "linux" } @@ -219,6 +224,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { if err != nil { return err } + fmt.Println(setup) return nil @@ -318,6 +324,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { func runSetupValidate(cmd *cobra.Command, args []string) error { fromFile := args[0] + input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading stdin: %w", err) @@ -325,7 +332,7 @@ func runSetupValidate(cmd *cobra.Command, args []string) error { if err = setup.Validate(input); err != nil { fmt.Printf("%v\n", err) - return fmt.Errorf("invalid setup file") + return errors.New("invalid setup file") } return nil diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 6ccac761727..f3c1a6273c1 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "os" "slices" @@ -36,7 +37,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, return err } if cli.cfg().Cscli.SimulationConfig == nil { - return fmt.Errorf("no simulation configured") + return errors.New("no simulation configured") } return nil @@ -99,11 +100,11 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { log.Printf("simulation mode for '%s' enabled", scenario) } if err := cli.dumpSimulationFile(); err != nil { - return fmt.Errorf("simulation enable: %s", err) + return fmt.Errorf("simulation enable: %w", err) } } else if forceGlobalSimulation { if err := cli.enableGlobalSimulation(); err != nil { - return fmt.Errorf("unable to enable global simulation mode: %s", err) + return fmt.Errorf("unable to enable global simulation mode: %w", err) } } else { printHelp(cmd) @@ -146,11 +147,11 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { log.Printf("simulation mode for '%s' disabled", scenario) } if err := cli.dumpSimulationFile(); err != nil { - return fmt.Errorf("simulation disable: %s", err) + return fmt.Errorf("simulation disable: %w", err) } } else if forceGlobalSimulation { if err := cli.disableGlobalSimulation(); err != nil { - return fmt.Errorf("unable to disable global simulation mode: %s", err) + return fmt.Errorf("unable to disable global simulation mode: %w", err) } } else { printHelp(cmd) @@ -202,7 +203,7 @@ func (cli *cliSimulation) enableGlobalSimulation() error { cfg.Cscli.SimulationConfig.Exclusions = []string{} if err := cli.dumpSimulationFile(); err != nil { - return fmt.Errorf("unable to dump simulation file: %s", err) + return fmt.Errorf("unable to dump simulation file: %w", err) } log.Printf("global simulation: enabled") @@ -215,12 +216,12 @@ func (cli *cliSimulation) dumpSimulationFile() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal simulation configuration: %s", err) + return fmt.Errorf("unable to marshal simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("write simulation config in '%s' failed: %w", cfg.ConfigPaths.SimulationFilePath, err) } log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath) @@ -237,12 +238,12 @@ func (cli *cliSimulation) disableGlobalSimulation() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal new simulation configuration: %s", err) + return fmt.Errorf("unable to marshal new simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s': %s", cfg.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("unable to write new simulation config in '%s': %w", cfg.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") @@ -269,8 +270,10 @@ func (cli *cliSimulation) status() { } } else { log.Println("global simulation: disabled") + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios in simulation mode :") + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 8b2481b4cf2..a48edeeeb9f 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -199,7 +199,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str return []byte(fmt.Sprintf("cannot parse API URL: %s", err)) } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return []byte(fmt.Sprintf("could not collect scenarios: %s", err)) } diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index fd29aa9d99b..f12aea5ac0d 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -17,12 +17,12 @@ import ( ) func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) } - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) if err != nil { return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) } @@ -52,11 +52,11 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. PapiURL: papiURL, VersionPrefix: "v1", UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return nil, err } - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) if err != nil { return nil, err } diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index 74ca1523a7d..21d16db3972 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -104,7 +104,7 @@ func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { c.Crowdsec.ContextToSend = make(map[string][]string, 0) if hub != nil { - items, err := hub.GetInstalledItems(cwhub.CONTEXTS) + items, err := hub.GetInstalledItemsByType(cwhub.CONTEXTS) if err != nil { return err } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 44e24020d03..6b9f56b2e17 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -214,9 +214,9 @@ func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { return i, nil } -// GetItemNames returns a slice of (full) item names for a given type +// GetNamesByType returns a slice of (full) item names for a given type // (eg. for collections: crowdsecurity/apache2 crowdsecurity/nginx). -func (h *Hub) GetItemNames(itemType string) []string { +func (h *Hub) GetNamesByType(itemType string) []string { m := h.GetItemMap(itemType) if m == nil { return nil @@ -230,8 +230,8 @@ func (h *Hub) GetItemNames(itemType string) []string { return names } -// GetAllItems returns a slice of all the items of a given type, installed or not. -func (h *Hub) GetAllItems(itemType string) ([]*Item, error) { +// GetItemsByType returns a slice of all the items of a given type, installed or not. +func (h *Hub) GetItemsByType(itemType string) ([]*Item, error) { if !slices.Contains(ItemTypes, itemType) { return nil, fmt.Errorf("invalid item type %s", itemType) } @@ -250,8 +250,8 @@ func (h *Hub) GetAllItems(itemType string) ([]*Item, error) { return ret, nil } -// GetInstalledItems returns a slice of the installed items of a given type. -func (h *Hub) GetInstalledItems(itemType string) ([]*Item, error) { +// GetInstalledItemsByType returns a slice of the installed items of a given type. +func (h *Hub) GetInstalledItemsByType(itemType string) ([]*Item, error) { if !slices.Contains(ItemTypes, itemType) { return nil, fmt.Errorf("invalid item type %s", itemType) } @@ -269,9 +269,9 @@ func (h *Hub) GetInstalledItems(itemType string) ([]*Item, error) { return retItems, nil } -// GetInstalledItemNames returns the names of the installed items of a given type. -func (h *Hub) GetInstalledItemNames(itemType string) ([]string, error) { - items, err := h.GetInstalledItems(itemType) +// GetInstalledNamesByType returns the names of the installed items of a given type. +func (h *Hub) GetInstalledNamesByType(itemType string) ([]string, error) { + items, err := h.GetInstalledItemsByType(itemType) if err != nil { return nil, err } From 60431804d8440b68e7c253be8f44137c832bf4b4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:11:57 +0200 Subject: [PATCH 100/318] db config: don't exit setup if can't detect fs, improve detection for freebsd (#2963) --- pkg/csconfig/database.go | 16 +++++++--------- pkg/types/getfstype.go | 2 +- pkg/types/getfstype_freebsd.go | 25 +++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 10 deletions(-) create mode 100644 pkg/types/getfstype_freebsd.go diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index a7bc57eefdc..9a9ed9a9f7f 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -76,26 +76,24 @@ func (c *Config) LoadDBConfig(inCli bool) error { if c.DbConfig.UseWal == nil { dbDir := filepath.Dir(c.DbConfig.DbPath) isNetwork, fsType, err := types.IsNetworkFS(dbDir) - if err != nil { + switch { + case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") - return nil - } - if isNetwork { + case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) - } else { + default: log.Debugf("database is on local filesystem (%s), setting useWal to true", fsType) c.DbConfig.UseWal = ptr.Of(true) } } else if *c.DbConfig.UseWal { dbDir := filepath.Dir(c.DbConfig.DbPath) isNetwork, fsType, err := types.IsNetworkFS(dbDir) - if err != nil { + switch { + case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) - return nil - } - if isNetwork { + case isNetwork: log.Warnf("database seems to be stored on a network share (%s), but useWal is set to true. Proceed at your own risk.", fsType) } } diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 67e018782c1..25790ecb6c5 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -1,4 +1,4 @@ -//go:build !windows +//go:build !windows && !freebsd package types diff --git a/pkg/types/getfstype_freebsd.go b/pkg/types/getfstype_freebsd.go new file mode 100644 index 00000000000..8fbe3dd7cc4 --- /dev/null +++ b/pkg/types/getfstype_freebsd.go @@ -0,0 +1,25 @@ +//go:build freebsd + +package types + +import ( + "fmt" + "syscall" +) + +func GetFSType(path string) (string, error) { + var fsStat syscall.Statfs_t + + if err := syscall.Statfs(path, &fsStat); err != nil { + return "", fmt.Errorf("failed to get filesystem type: %w", err) + } + + bs := fsStat.Fstypename + + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + + return string(b), nil +} From f4ed7b35205e87001e715ee7b0ac223f2f23e7da Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Thu, 25 Apr 2024 13:43:38 +0200 Subject: [PATCH 101/318] Truncate meta data (#2966) * truncate meta-data if they are too big --- pkg/database/alerts.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 0502c25312d..5559cbb3972 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -636,14 +636,24 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(alertItem.Meta) > 0 { metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) for i, metaItem := range alertItem.Meta { + key := metaItem.Key + value := metaItem.Value + if len(metaItem.Value) > 4095 { + c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + value = value[:4095] + } + if len(metaItem.Key) > 255 { + c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) + key = key[:255] + } metaBulk[i] = c.Ent.Meta.Create(). - SetKey(metaItem.Key). - SetValue(metaItem.Value) + SetKey(key). + SetValue(value) } metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) if err != nil { - return nil, errors.Wrapf(BulkError, "creating alert meta: %s", err) + c.Log.Warningf("error creating alert meta: %s", err) } } From 845d4542bbc88586e8cad61df0d893ec6b1c03a7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 14:41:02 +0200 Subject: [PATCH 102/318] cscli: use yaml.v3 (#2965) * cscli: use yaml.v3 * lint --- .golangci.yml | 7 ------- cmd/crowdsec-cli/alerts.go | 2 +- cmd/crowdsec-cli/capi.go | 3 +-- cmd/crowdsec-cli/hubtest.go | 4 ++-- cmd/crowdsec-cli/lapi.go | 2 +- cmd/crowdsec-cli/simulation.go | 4 ++-- cmd/crowdsec/crowdsec.go | 8 ++++---- 7 files changed, 11 insertions(+), 19 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ff46ef1c02a..b1aa22da52f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -82,13 +82,6 @@ linters-settings: - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - - "!**/cmd/crowdsec-cli/alerts.go" - - "!**/cmd/crowdsec-cli/capi.go" - - "!**/cmd/crowdsec-cli/config_show.go" - - "!**/cmd/crowdsec-cli/hubtest.go" - - "!**/cmd/crowdsec-cli/lapi.go" - - "!**/cmd/crowdsec-cli/simulation.go" - - "!**/cmd/crowdsec/crowdsec.go" - "!**/cmd/notification-dummy/main.go" - "!**/cmd/notification-email/main.go" - "!**/cmd/notification-http/main.go" diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 59dff809458..d31c99e1b12 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -17,7 +17,7 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/version" diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index ec11acbbec4..b5180d0505a 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/version" @@ -85,7 +85,6 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { URL: apiurl, VersionPrefix: CAPIURLPrefix, }, nil) - if err != nil { return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 51735ce19ce..458de672349 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -14,7 +14,7 @@ import ( "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/emoji" @@ -136,7 +136,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios nucleiFileName := fmt.Sprintf("%s.yaml", testName) nucleiFilePath := filepath.Join(testPath, nucleiFileName) - nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755) + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) if err != nil { return err } diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 99c3ee45a13..369de5b426b 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -13,7 +13,7 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/version" diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index f3c1a6273c1..3301c4b797e 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -8,7 +8,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -74,7 +74,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { if len(args) > 0 { for _, scenario := range args { - var item = hub.GetItem(cwhub.SCENARIOS, scenario) + item := hub.GetItem(cwhub.SCENARIOS, scenario) if item == nil { log.Errorf("'%s' doesn't exist or is not a scenario", scenario) continue diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index f604af1dedd..8f07d165f6b 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -9,7 +9,7 @@ import ( "time" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/trace" @@ -207,7 +207,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub } func dumpBucketsPour() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) if err != nil { log.Fatalf("open: %s", err) } @@ -230,7 +230,7 @@ func dumpBucketsPour() { } func dumpParserState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) if err != nil { log.Fatalf("open: %s", err) } @@ -253,7 +253,7 @@ func dumpParserState() { } func dumpOverflowState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) if err != nil { log.Fatalf("open: %s", err) } From ceb4479ec41e6afd10e9ad4c7a5e5f45e612a03c Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 25 Apr 2024 15:05:11 +0200 Subject: [PATCH 103/318] add zfs magic for GetFSType (#2950) --- pkg/types/getfstype.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 25790ecb6c5..aac12c7fc94 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -4,6 +4,7 @@ package types import ( "fmt" + "golang.org/x/sys/unix" ) @@ -92,6 +93,7 @@ var fsTypeMapping map[int64]string = map[int64]string{ 0xabba1974: "xenfs", 0x012ff7b4: "xenix", 0x58465342: "xfs", + 0x2fc12fc1: "zfs", } func GetFSType(path string) (string, error) { From 2abc078e535b807f0753131bb9a6aa429a075cb5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 15:11:08 +0200 Subject: [PATCH 104/318] use go 1.22.2 (#2826) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- .golangci.yml | 9 +++++++-- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- go.mod | 2 +- 14 files changed, 20 insertions(+), 15 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 4e977201c5a..72694276ac1 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 9e320b1b3de..e52c4759254 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 2cd09b494a0..c603e468a7b 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 742d1ee65ff..345f8761868 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 278426d778c..0ea60a31dd5 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1f27a8c7c2a..e4c078f3762 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 9d5c1739745..d8ec22e6a02 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 4eac3777df9..3674358ff28 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6cdf111a4ba..d2a5df0535c 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Build the binaries run: | diff --git a/.golangci.yml b/.golangci.yml index b1aa22da52f..f27c5d863e5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -144,6 +144,13 @@ linters: - structcheck - varcheck + # + # Disabled until fixed for go 1.22 + # + + - copyloopvar # copyloopvar is a linter detects places where loop variables are copied + - intrange # intrange is a linter to find places where for loops could make use of an integer range. + # # Enabled # @@ -152,7 +159,6 @@ linters: # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully - # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied # - cyclop # checks function and package cyclomatic complexity # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages @@ -181,7 +187,6 @@ linters: # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. - # - intrange # intrange is a linter to find places where for loops could make use of an integer range. # - lll # Reports long lines # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). # - logrlint # Check logr arguments. diff --git a/Dockerfile b/Dockerfile index 69de0f9df8f..d67d534fe3d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.9-alpine3.18 AS build +FROM golang:1.22.2-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 9bcb517bb2d..4fc3a923823 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.9-bookworm AS build +FROM golang:1.22.2-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b662a809ad7..99909bbb467 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.9' + version: '1.22.2' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index 70d819a4059..8afc2b8a095 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.21 +go 1.22 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). From d2c4bc55fc7a77be2f5cc697eaee71a9634d2d02 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:34:49 +0200 Subject: [PATCH 105/318] plugins: use yaml.v3 (#2969) * plugins: use yaml.v3 * lint --- .golangci.yml | 5 ----- cmd/notification-dummy/main.go | 14 ++++++++++---- cmd/notification-email/main.go | 22 ++++++++++++++++------ cmd/notification-http/main.go | 23 +++++++++++++++++++---- cmd/notification-slack/main.go | 14 ++++++++++---- cmd/notification-splunk/main.go | 18 +++++++++++++----- 6 files changed, 68 insertions(+), 28 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f27c5d863e5..cc6551310c6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -82,11 +82,6 @@ linters-settings: - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - - "!**/cmd/notification-dummy/main.go" - - "!**/cmd/notification-email/main.go" - - "!**/cmd/notification-http/main.go" - - "!**/cmd/notification-slack/main.go" - - "!**/cmd/notification-splunk/main.go" - "!**/pkg/acquisition/acquisition.go" - "!**/pkg/acquisition/acquisition_test.go" - "!**/pkg/acquisition/modules/appsec/appsec.go" diff --git a/cmd/notification-dummy/main.go b/cmd/notification-dummy/main.go index ef8d29ffa44..024a1eb81ba 100644 --- a/cmd/notification-dummy/main.go +++ b/cmd/notification-dummy/main.go @@ -5,10 +5,11 @@ import ( "fmt" "os" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -32,6 +33,7 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi if _, ok := s.PluginConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := s.PluginConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { @@ -42,19 +44,22 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi logger.Debug(notification.Text) if cfg.OutputFile != nil && *cfg.OutputFile != "" { - f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { logger.Error(fmt.Sprintf("Cannot open notification file: %s", err)) } + if _, err := f.WriteString(notification.Text + "\n"); err != nil { f.Close() logger.Error(fmt.Sprintf("Cannot write notification to file: %s", err)) } + err = f.Close() if err != nil { logger.Error(fmt.Sprintf("Cannot close notification file: %s", err)) } } + fmt.Println(notification.Text) return &protobufs.Empty{}, nil @@ -64,11 +69,12 @@ func (s *DummyPlugin) Configure(ctx context.Context, config *protobufs.Config) ( d := PluginConfig{} err := yaml.Unmarshal(config.Config, &d) s.PluginConfigByName[d.Name] = d + return &protobufs.Empty{}, err } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 789740156fe..3b535ae7ffa 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -2,15 +2,17 @@ package main import ( "context" + "errors" "fmt" "os" "time" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" mail "github.com/xhit/go-simple-mail/v2" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) var baseLogger hclog.Logger = hclog.New(&hclog.LoggerOptions{ @@ -72,19 +74,20 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( } if d.Name == "" { - return nil, fmt.Errorf("name is required") + return nil, errors.New("name is required") } if d.SMTPHost == "" { - return nil, fmt.Errorf("SMTP host is not set") + return nil, errors.New("SMTP host is not set") } if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 { - return nil, fmt.Errorf("receiver emails are not set") + return nil, errors.New("receiver emails are not set") } n.ConfigByName[d.Name] = d baseLogger.Debug(fmt.Sprintf("Email plugin '%s' use SMTP host '%s:%d'", d.Name, d.SMTPHost, d.SMTPPort)) + return &protobufs.Empty{}, nil } @@ -92,6 +95,7 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi if _, ok := n.ConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := n.ConfigByName[notification.Name] logger := baseLogger.Named(cfg.Name) @@ -117,6 +121,7 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi server.ConnectTimeout, err = time.ParseDuration(cfg.ConnectTimeout) if err != nil { logger.Warn(fmt.Sprintf("invalid connect timeout '%s', using default '10s'", cfg.ConnectTimeout)) + server.ConnectTimeout = 10 * time.Second } } @@ -125,15 +130,18 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi server.SendTimeout, err = time.ParseDuration(cfg.SendTimeout) if err != nil { logger.Warn(fmt.Sprintf("invalid send timeout '%s', using default '10s'", cfg.SendTimeout)) + server.SendTimeout = 10 * time.Second } } logger.Debug("making smtp connection") + smtpClient, err := server.Connect() if err != nil { return &protobufs.Empty{}, err } + logger.Debug("smtp connection done") email := mail.NewMSG() @@ -146,12 +154,14 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi if err != nil { return &protobufs.Empty{}, err } + logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails)) + return &protobufs.Empty{}, nil } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 382f30fea53..6b11a78ef86 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -12,10 +12,11 @@ import ( "os" "strings" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -90,18 +91,23 @@ func getTLSClient(c *PluginConfig) error { tlsConfig.Certificates = []tls.Certificate{cert} } + transport := &http.Transport{ TLSClientConfig: tlsConfig, } + if c.UnixSocket != "" { logger.Info(fmt.Sprintf("Using socket '%s'", c.UnixSocket)) + transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial("unix", strings.TrimSuffix(c.UnixSocket, "/")) } } + c.Client = &http.Client{ Transport: transport, } + return nil } @@ -109,6 +115,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if _, ok := s.PluginConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := s.PluginConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { @@ -121,11 +128,14 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if err != nil { return nil, err } + for headerName, headerValue := range cfg.Headers { logger.Debug(fmt.Sprintf("adding header %s: %s", headerName, headerValue)) request.Header.Add(headerName, headerValue) } + logger.Debug(fmt.Sprintf("making HTTP %s call to %s with body %s", cfg.Method, cfg.URL, notification.Text)) + resp, err := cfg.Client.Do(request.WithContext(ctx)) if err != nil { logger.Error(fmt.Sprintf("Failed to make HTTP request : %s", err)) @@ -135,7 +145,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific respData, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("failed to read response body got error %s", err) + return nil, fmt.Errorf("failed to read response body got error %w", err) } logger.Debug(fmt.Sprintf("got response %s", string(respData))) @@ -143,6 +153,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if resp.StatusCode < 200 || resp.StatusCode >= 300 { logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode)) logger.Debug(fmt.Sprintf("HTTP server returned body: %s", string(respData))) + return &protobufs.Empty{}, nil } @@ -151,21 +162,25 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) if err != nil { return nil, err } + err = getTLSClient(&d) if err != nil { return nil, err } + s.PluginConfigByName[d.Name] = d logger.Debug(fmt.Sprintf("HTTP plugin '%s' use URL '%s'", d.Name, d.URL)) + return &protobufs.Empty{}, err } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index 373cd9527ab..1e73d005fd8 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -5,12 +5,12 @@ import ( "fmt" "os" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "github.com/slack-go/slack" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -33,13 +33,16 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio if _, ok := n.ConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := n.ConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) } + logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name)) logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text)) + err := slack.PostWebhookContext(ctx, n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{ Text: notification.Text, }) @@ -52,16 +55,19 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio func (n *Notify) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { d := PluginConfig{} + if err := yaml.Unmarshal(config.Config, &d); err != nil { return nil, err } + n.ConfigByName[d.Name] = d logger.Debug(fmt.Sprintf("Slack plugin '%s' use URL '%s'", d.Name, d.Webhook)) + return &protobufs.Empty{}, nil } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-splunk/main.go b/cmd/notification-splunk/main.go index b24aa538f9a..26190c58a89 100644 --- a/cmd/notification-splunk/main.go +++ b/cmd/notification-splunk/main.go @@ -10,11 +10,11 @@ import ( "os" "strings" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "gopkg.in/yaml.v3" - "gopkg.in/yaml.v2" + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ @@ -44,6 +44,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio if _, ok := s.PluginConfigByName[notification.Name]; !ok { return &protobufs.Empty{}, fmt.Errorf("splunk invalid config name %s", notification.Name) } + cfg := s.PluginConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { @@ -53,6 +54,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio logger.Info(fmt.Sprintf("received notify signal for %s config", notification.Name)) p := Payload{Event: notification.Text} + data, err := json.Marshal(p) if err != nil { return &protobufs.Empty{}, err @@ -65,6 +67,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio req.Header.Add("Authorization", fmt.Sprintf("Splunk %s", cfg.Token)) logger.Debug(fmt.Sprintf("posting event %s to %s", string(data), req.URL)) + resp, err := s.Client.Do(req.WithContext(ctx)) if err != nil { return &protobufs.Empty{}, err @@ -73,15 +76,19 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio if resp.StatusCode != http.StatusOK { content, err := io.ReadAll(resp.Body) if err != nil { - return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %s", err) + return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %w", err) } + return &protobufs.Empty{}, fmt.Errorf("got non 200 response %s", string(content)) } + respData, err := io.ReadAll(resp.Body) if err != nil { - return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %s", err) + return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %w", err) } + logger.Debug(fmt.Sprintf("got response %s", string(respData))) + return &protobufs.Empty{}, nil } @@ -90,11 +97,12 @@ func (s *Splunk) Configure(ctx context.Context, config *protobufs.Config) (*prot err := yaml.Unmarshal(config.Config, &d) s.PluginConfigByName[d.Name] = d logger.Debug(fmt.Sprintf("Splunk plugin '%s' use URL '%s'", d.Name, d.URL)) + return &protobufs.Empty{}, err } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), From c4473839c43dad25727f8cd341d94b9baa5b1702 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:53:10 +0200 Subject: [PATCH 106/318] Refact pkg/parser/node (#2953) * extract method processFilter() * extract method processWhitelist() * lint (whitespace, errors) --- .golangci.yml | 4 +- pkg/parser/node.go | 230 +++++++++++++++++++++++++++++---------------- 2 files changed, 153 insertions(+), 81 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cc6551310c6..9f059aa8aa9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,7 +3,7 @@ linters-settings: cyclop: # lower this after refactoring - max-complexity: 53 + max-complexity: 48 gci: sections: @@ -22,7 +22,7 @@ linters-settings: gocyclo: # lower this after refactoring - min-complexity: 49 + min-complexity: 48 funlen: # Checks the number of lines in a function. diff --git a/pkg/parser/node.go b/pkg/parser/node.go index fe5432ce938..0906f0b5143 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -22,69 +22,70 @@ import ( type Node struct { FormatVersion string `yaml:"format"` - //Enable config + runtime debug of node via config o/ + // Enable config + runtime debug of node via config o/ Debug bool `yaml:"debug,omitempty"` - //If enabled, the node (and its child) will report their own statistics + // If enabled, the node (and its child) will report their own statistics Profiling bool `yaml:"profiling,omitempty"` - //Name, author, description and reference(s) for parser pattern + // Name, author, description and reference(s) for parser pattern Name string `yaml:"name,omitempty"` Author string `yaml:"author,omitempty"` Description string `yaml:"description,omitempty"` References []string `yaml:"references,omitempty"` - //if debug is present in the node, keep its specific Logger in runtime structure + // if debug is present in the node, keep its specific Logger in runtime structure Logger *log.Entry `yaml:"-"` - //This is mostly a hack to make writing less repetitive. - //relying on stage, we know which field to parse, and we - //can also promote log to next stage on success + // This is mostly a hack to make writing less repetitive. + // relying on stage, we know which field to parse, and we + // can also promote log to next stage on success Stage string `yaml:"stage,omitempty"` - //OnSuccess allows to tag a node to be able to move log to next stage on success + // OnSuccess allows to tag a node to be able to move log to next stage on success OnSuccess string `yaml:"onsuccess,omitempty"` - rn string //this is only for us in debug, a random generated name for each node - //Filter is executed at runtime (with current log line as context) - //and must succeed or node is exited + rn string // this is only for us in debug, a random generated name for each node + // Filter is executed at runtime (with current log line as context) + // and must succeed or node is exited Filter string `yaml:"filter,omitempty"` - RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter - //If node has leafs, execute all of them until one asks for a 'break' + RunTimeFilter *vm.Program `yaml:"-" json:"-"` // the actual compiled filter + // If node has leafs, execute all of them until one asks for a 'break' LeavesNodes []Node `yaml:"nodes,omitempty"` - //Flag used to describe when to 'break' or return an 'error' + // Flag used to describe when to 'break' or return an 'error' EnrichFunctions EnricherCtx /* If the node is actually a leaf, it can have : grok, enrich, statics */ - //pattern_syntax are named grok patterns that are re-utilized over several grok patterns + // pattern_syntax are named grok patterns that are re-utilized over several grok patterns SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"` - //Holds a grok pattern + // Holds a grok pattern Grok GrokPattern `yaml:"grok,omitempty"` - //Statics can be present in any type of node and is executed last + // Statics can be present in any type of node and is executed last Statics []ExtraField `yaml:"statics,omitempty"` - //Stash allows to capture data from the log line and store it in an accessible cache + // Stash allows to capture data from the log line and store it in an accessible cache Stash []DataCapture `yaml:"stash,omitempty"` - //Whitelists + // Whitelists Whitelist Whitelist `yaml:"whitelist,omitempty"` Data []*types.DataSource `yaml:"data,omitempty"` } func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { - - //stage is being set automagically + // stage is being set automagically if n.Stage == "" { - return fmt.Errorf("stage needs to be an existing stage") + return errors.New("stage needs to be an existing stage") } /* "" behaves like continue */ if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" { return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess) } + if n.Filter != "" && n.RunTimeFilter == nil { return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter) } if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" { if n.Grok.TargetField == "" && n.Grok.ExpValue == "" { - return fmt.Errorf("grok requires 'expression' or 'apply_on'") + return errors.New("grok requires 'expression' or 'apply_on'") } + if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" { - return fmt.Errorf("grok needs 'pattern' or 'name'") + return errors.New("grok needs 'pattern' or 'name'") } } @@ -93,6 +94,7 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { if static.ExpValue == "" { return fmt.Errorf("static %d : when method is set, expression must be present", idx) } + if _, ok := ectx.Registered[static.Method]; !ok { log.Warningf("the method '%s' doesn't exist or the plugin has not been initialized", static.Method) } @@ -100,6 +102,7 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" { return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx) } + if static.Value == "" && static.RunTimeValue == nil { return fmt.Errorf("static %d value or expression must be set", idx) } @@ -110,72 +113,76 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { if stash.Name == "" { return fmt.Errorf("stash %d : name must be set", idx) } + if stash.Value == "" { return fmt.Errorf("stash %s : value expression must be set", stash.Name) } + if stash.Key == "" { return fmt.Errorf("stash %s : key expression must be set", stash.Name) } + if stash.TTL == "" { return fmt.Errorf("stash %s : ttl must be set", stash.Name) } + if stash.Strategy == "" { stash.Strategy = "LRU" } - //should be configurable + // should be configurable if stash.MaxMapSize == 0 { stash.MaxMapSize = 100 } } + return nil } -func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { - var NodeState bool - var NodeHasOKGrok bool +func (n *Node) processFilter(cachedExprEnv map[string]interface{}) (bool, error) { clog := n.Logger + if n.RunTimeFilter == nil { + clog.Tracef("Node has not filter, enter") + return true, nil + } - cachedExprEnv := expressionEnv + // Evaluate node's filter + output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug) + if err != nil { + clog.Warningf("failed to run filter : %v", err) + clog.Debugf("Event leaving node : ko") - clog.Tracef("Event entering node") - if n.RunTimeFilter != nil { - //Evaluate node's filter - output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug) - if err != nil { - clog.Warningf("failed to run filter : %v", err) - clog.Debugf("Event leaving node : ko") - return false, nil - } + return false, nil + } - switch out := output.(type) { - case bool: - if !out { - clog.Debugf("Event leaving node : ko (failed filter)") - return false, nil - } - default: - clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output) - clog.Debugf("Event leaving node : ko") + switch out := output.(type) { + case bool: + if !out { + clog.Debugf("Event leaving node : ko (failed filter)") return false, nil } - NodeState = true - } else { - clog.Tracef("Node has not filter, enter") - NodeState = true - } + default: + clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output) + clog.Debugf("Event leaving node : ko") - if n.Name != "" { - NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + return false, nil } - exprErr := error(nil) + + return true, nil +} + +func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) { + var exprErr error + isWhitelisted := n.CheckIPsWL(p) if !isWhitelisted { isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p) } + if exprErr != nil { // Previous code returned nil if there was an error, so we keep this behavior return false, nil //nolint:nilerr } + if isWhitelisted && !p.Whitelisted { p.Whitelisted = true p.WhitelistReason = n.Whitelist.Reason @@ -185,18 +192,51 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri for k := range p.Overflow.Sources { ips = append(ips, k) } - clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason) + + n.Logger.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason) + p.Overflow.Whitelisted = true } } - //Process grok if present, should be exclusive with nodes :) + return isWhitelisted, nil +} + +func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { + var NodeHasOKGrok bool + + clog := n.Logger + + cachedExprEnv := expressionEnv + + clog.Tracef("Event entering node") + + NodeState, err := n.processFilter(cachedExprEnv) + if err != nil { + return false, err + } + + if !NodeState { + return false, nil + } + + if n.Name != "" { + NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + } + + isWhitelisted, err := n.processWhitelist(cachedExprEnv, p) + if err != nil { + return false, err + } + + // Process grok if present, should be exclusive with nodes :) gstr := "" + if n.Grok.RunTimeRegexp != nil { clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) - //for unparsed, parsed etc. set sensible defaults to reduce user hassle + // for unparsed, parsed etc. set sensible defaults to reduce user hassle if n.Grok.TargetField != "" { - //it's a hack to avoid using real reflect + // it's a hack to avoid using real reflect if n.Grok.TargetField == "Line.Raw" { gstr = p.Line.Raw } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { @@ -211,6 +251,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Warningf("failed to run RunTimeValue : %v", err) NodeState = false } + switch out := output.(type) { case string: gstr = out @@ -229,12 +270,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } else { groklabel = n.Grok.RegexpName } + grok := n.Grok.RunTimeRegexp.Parse(gstr) if len(grok) > 0 { /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ NodeHasOKGrok = true + clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) - //We managed to grok stuff, merged into parse + // We managed to grok stuff, merged into parse for k, v := range grok { clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) p.Parsed[k] = v @@ -246,34 +289,37 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri return false, err } } else { - //grok failed, node failed + // grok failed, node failed clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) NodeState = false } - } else { clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) } - //Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok + // Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil { for idx, stash := range n.Stash { - var value string - var key string + var ( + key string + value string + ) + if stash.ValueExpression == nil { clog.Warningf("Stash %d has no value expression, skipping", idx) continue } + if stash.KeyExpression == nil { clog.Warningf("Stash %d has no key expression, skipping", idx) continue } - //collect the data + // collect the data output, err := exprhelpers.Run(stash.ValueExpression, cachedExprEnv, clog, n.Debug) if err != nil { clog.Warningf("Error while running stash val expression : %v", err) } - //can we expect anything else than a string ? + // can we expect anything else than a string ? switch output := output.(type) { case string: value = output @@ -282,12 +328,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri continue } - //collect the key + // collect the key output, err = exprhelpers.Run(stash.KeyExpression, cachedExprEnv, clog, n.Debug) if err != nil { clog.Warningf("Error while running stash key expression : %v", err) } - //can we expect anything else than a string ? + // can we expect anything else than a string ? switch output := output.(type) { case string: key = output @@ -299,7 +345,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } } - //Iterate on leafs + // Iterate on leafs for _, leaf := range n.LeavesNodes { ret, err := leaf.process(p, ctx, cachedExprEnv) if err != nil { @@ -307,7 +353,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Debugf("Event leaving node : ko") return false, err } + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + if ret { NodeState = true /* if child is successful, stop processing */ @@ -328,12 +376,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Tracef("State after nodes : %v", NodeState) - //grok or leafs failed, don't process statics + // grok or leafs failed, don't process statics if !NodeState { if n.Name != "" { NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() } + clog.Debugf("Event leaving node : ko") + return NodeState, nil } @@ -360,9 +410,10 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri if NodeState { clog.Debugf("Event leaving node : ok") log.Tracef("node is successful, check strategy") + if n.OnSuccess == "next_stage" { idx := stageidx(p.Stage, ctx.Stages) - //we're at the last stage + // we're at the last stage if idx+1 == len(ctx.Stages) { clog.Debugf("node reached the last stage : %s", p.Stage) } else { @@ -375,15 +426,16 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } else { clog.Debugf("Event leaving node : ko") } + clog.Tracef("Node successful, continue") + return NodeState, nil } func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { var err error - var valid bool - valid = false + valid := false dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true} n.rn = seed.Generate() @@ -393,10 +445,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* if the node has debugging enabled, create a specific logger with debug that will be used only for processing this node ;) */ if n.Debug { - var clog = log.New() + clog := log.New() if err = types.ConfigureLogger(clog); err != nil { log.Fatalf("While creating bucket-specific logger : %s", err) } + clog.SetLevel(log.DebugLevel) n.Logger = clog.WithFields(log.Fields{ "id": n.rn, @@ -414,7 +467,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { n.Logger.Tracef("Compiling : %s", dumpr.Sdump(n)) - //compile filter if present + // compile filter if present if n.Filter != "" { n.RunTimeFilter, err = expr.Compile(n.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { @@ -425,12 +478,15 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* handle pattern_syntax and groks */ for _, pattern := range n.SubGroks { n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value) + if err = pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil { if errors.Is(err, grokky.ErrAlreadyExist) { n.Logger.Warningf("grok '%s' already registred", pattern.Key) continue } + n.Logger.Errorf("Unable to compile subpattern %s : %v", pattern.Key, err) + return err } } @@ -438,28 +494,36 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* load grok by name or compile in-place */ if n.Grok.RegexpName != "" { n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName) + n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) if err != nil { return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err) } + if n.Grok.RunTimeRegexp == nil { return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName) } + n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.String()) + valid = true } else if n.Grok.RegexpValue != "" { if strings.HasSuffix(n.Grok.RegexpValue, "\n") { n.Logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue) } + n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue) if err != nil { return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err) } + if n.Grok.RunTimeRegexp == nil { // We shouldn't be here because compilation succeeded, so regexp shouldn't be nil return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue) } + n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.String()) + valid = true } @@ -473,7 +537,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } /* load grok statics */ - //compile expr statics if present + // compile expr statics if present for idx := range n.Grok.Statics { if n.Grok.Statics[idx].ExpValue != "" { n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue, @@ -482,6 +546,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return err } } + valid = true } @@ -505,7 +570,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } logLvl := n.Logger.Logger.GetLevel() - //init the cache, does it make sense to create it here just to be sure everything is fine ? + // init the cache, does it make sense to create it here just to be sure everything is fine ? if err = cache.CacheInit(cache.CacheCfg{ Size: n.Stash[i].MaxMapSize, TTL: n.Stash[i].TTLVal, @@ -526,14 +591,18 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { if !n.LeavesNodes[idx].Debug && n.Debug { n.LeavesNodes[idx].Debug = true } + if !n.LeavesNodes[idx].Profiling && n.Profiling { n.LeavesNodes[idx].Profiling = true } + n.LeavesNodes[idx].Stage = n.Stage + err = n.LeavesNodes[idx].compile(pctx, ectx) if err != nil { return err } + valid = true } @@ -546,6 +615,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return err } } + valid = true } @@ -554,13 +624,15 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { if err != nil { return err } + valid = valid || whitelistValid if !valid { /* node is empty, error force return */ n.Logger.Error("Node is empty or invalid, abort") n.Stage = "" - return fmt.Errorf("Node is empty") + + return errors.New("Node is empty") } if err := n.validate(pctx, ectx); err != nil { From 05b54687b6e638fe2092d42d84d26d527e2b183d Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 26 Apr 2024 15:56:15 +0100 Subject: [PATCH 107/318] feat: support stdout in cscli support dump (#2939) * feat: support stdout in cscli support dump * fix: skip log.info if stdout * fix: handle errors by returning to runE instead --- cmd/crowdsec-cli/support.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index a48edeeeb9f..737411e28b8 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -319,7 +319,7 @@ cscli support dump -f /tmp/crowdsec-support.zip `, Args: cobra.NoArgs, DisableAutoGenTag: true, - Run: func(_ *cobra.Command, _ []string) { + RunE: func(_ *cobra.Command, _ []string) error { var err error var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool infos := map[string][]byte{ @@ -473,15 +473,19 @@ cscli support dump -f /tmp/crowdsec-support.zip err = zipWriter.Close() if err != nil { - log.Fatalf("could not finalize zip file: %s", err) + return fmt.Errorf("could not finalize zip file: %s", err) } + if outFile == "-" { + _, err = os.Stdout.Write(w.Bytes()) + return err + } err = os.WriteFile(outFile, w.Bytes(), 0o600) if err != nil { - log.Fatalf("could not write zip file to %s: %s", outFile, err) + return fmt.Errorf("could not write zip file to %s: %s", outFile, err) } - log.Infof("Written zip file to %s", outFile) + return nil }, } From 3788610aff31782f949cc9963ad9e3b32b26f961 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 10:25:04 +0200 Subject: [PATCH 108/318] cscli: avoid global vars (#2977) * cscli: avoid global usage This is required to make it possible to split the package * lint (fmt.Errorf) --- cmd/crowdsec-cli/hub.go | 6 ++++-- cmd/crowdsec-cli/hubappsec.go | 2 +- cmd/crowdsec-cli/item_metrics.go | 10 ++++----- cmd/crowdsec-cli/item_suggest.go | 8 +++---- cmd/crowdsec-cli/itemcli.go | 14 +++++++------ cmd/crowdsec-cli/items.go | 12 +++++------ cmd/crowdsec-cli/support.go | 2 +- pkg/acquisition/acquisition_test.go | 21 ++++++++++--------- .../modules/cloudwatch/cloudwatch_test.go | 3 ++- pkg/csconfig/config_paths.go | 5 +++-- pkg/longpollclient/client.go | 4 ++-- pkg/parser/parsing_test.go | 4 ++-- 12 files changed, 49 insertions(+), 42 deletions(-) diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 71347a5a57b..62eb894d8f2 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -47,7 +47,9 @@ cscli hub upgrade`, } func (cli *cliHub) list(all bool) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, nil, log.StandardLogger()) if err != nil { return err } @@ -69,7 +71,7 @@ func (cli *cliHub) list(all bool) error { } } - err = listItems(color.Output, cwhub.ItemTypes, items, true) + err = listItems(color.Output, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/hubappsec.go index 7ee578edc2f..1df3212f941 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/hubappsec.go @@ -50,7 +50,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, func NewCLIAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode - if csConfig.Cscli.Output != "human" { + if cfg().Cscli.Output != "human" { return nil } diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index b571fb1c5ed..9459968790b 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -18,22 +18,22 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(hubItem *cwhub.Item) error { +func ShowMetrics(prometheusURL string, hubItem *cwhub.Item) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := GetParserMetric(csConfig.Cscli.PrometheusUrl, hubItem.Name) + metrics := GetParserMetric(prometheusURL, hubItem.Name) parserMetricsTable(color.Output, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := GetScenarioMetric(csConfig.Cscli.PrometheusUrl, hubItem.Name) + metrics := GetScenarioMetric(prometheusURL, hubItem.Name) scenarioMetricsTable(color.Output, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(sub); err != nil { + if err := ShowMetrics(prometheusURL, sub); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := GetAppsecRuleMetric(csConfig.Cscli.PrometheusUrl, hubItem.Name) + metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) appsecMetricsTable(color.Output, hubItem.Name, metrics) default: // no metrics for this item type } diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/item_suggest.go index 9272abedd50..0ea656549ba 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/item_suggest.go @@ -36,8 +36,8 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str return msg } -func compAllItems(itemType string, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(csConfig, nil, nil) +func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil, nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault } @@ -55,8 +55,8 @@ func compAllItems(itemType string, args []string, toComplete string) ([]string, return comp, cobra.ShellCompDirectiveNoFileComp } -func compInstalledItems(itemType string, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(csConfig, nil, nil) +func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil, nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index cdaf7877054..44a734d5e6a 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -112,7 +112,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compAllItems(cli.name, args, toComplete) + return compAllItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.install(args, downloadOnly, force, ignoreError) @@ -238,7 +238,7 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { Aliases: []string{"delete"}, DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete) + return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.remove(args, purge, force, all) @@ -333,7 +333,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { Example: cli.upgradeHelp.example, DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete) + return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.upgrade(args, force, all) @@ -381,7 +381,7 @@ func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMet continue } - if err = inspectItem(item, !noMetrics); err != nil { + if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl); err != nil { return err } @@ -411,7 +411,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete) + return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.inspect(args, url, diff, rev, noMetrics) @@ -428,6 +428,8 @@ func (cli cliItem) newInspectCmd() *cobra.Command { } func (cli cliItem) list(args []string, all bool) error { + cfg := cli.cfg() + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err @@ -440,7 +442,7 @@ func (cli cliItem) list(args []string, all bool) error { return err } - if err = listItems(color.Output, []string{cli.name}, items, false); err != nil { + if err = listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output); err != nil { return err } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index 4ae643151f2..9af432c32c1 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -54,8 +54,8 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b return items, nil } -func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool) error { - switch csConfig.Cscli.Output { +func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { + switch output { case "human": nothingToDisplay := true @@ -143,8 +143,8 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item return nil } -func inspectItem(item *cwhub.Item, showMetrics bool) error { - switch csConfig.Cscli.Output { +func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string) error { + switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) enc.SetIndent(2) @@ -161,7 +161,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error { fmt.Print(string(b)) } - if csConfig.Cscli.Output != "human" { + if output != "human" { return nil } @@ -174,7 +174,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error { if showMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(item); err != nil { + if err := ShowMetrics(prometheusURL, item); err != nil { return err } } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 737411e28b8..418a981adee 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -154,7 +154,7 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte { log.Warnf("could not collect %s list: %s", itemType, err) } - if err := listItems(out, []string{itemType}, items, false); err != nil { + if err := listItems(out, []string{itemType}, items, false, "human"); err != nil { log.Warnf("could not collect %s list: %s", itemType, err) } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 33e4948552a..1fbac2cdc00 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -1,6 +1,7 @@ package acquisition import ( + "errors" "fmt" "strings" "testing" @@ -50,7 +51,7 @@ func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) } if f.Toto == "" { - return fmt.Errorf("expect non-empty toto") + return errors.New("expect non-empty toto") } return nil @@ -64,7 +65,7 @@ func (f *MockSource) GetAggregMetrics() []prometheus.Collector { func (f *MockSource) Dump() interface{} { return f } func (f *MockSource) GetName() string { return "mock" } func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("not supported") + return errors.New("not supported") } func (f *MockSource) GetUuid() string { return "" } @@ -73,7 +74,7 @@ type MockSourceCantRun struct { MockSource } -func (f *MockSourceCantRun) CanRun() error { return fmt.Errorf("can't run bro") } +func (f *MockSourceCantRun) CanRun() error { return errors.New("can't run bro") } func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" } // appendMockSource is only used to add mock source for tests @@ -331,14 +332,14 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { - return fmt.Errorf("can't run in tail") + return errors.New("can't run in tail") } func (f *MockCat) CanRun() error { return nil } func (f *MockCat) GetMetrics() []prometheus.Collector { return nil } func (f *MockCat) GetAggregMetrics() []prometheus.Collector { return nil } func (f *MockCat) Dump() interface{} { return f } func (f *MockCat) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("not supported") + return errors.New("not supported") } func (f *MockCat) GetUuid() string { return "" } @@ -366,7 +367,7 @@ func (f *MockTail) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockTail) GetName() string { return "mock_tail" } func (f *MockTail) GetMode() string { return "tail" } func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { - return fmt.Errorf("can't run in cat mode") + return errors.New("can't run in cat mode") } func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { for i := 0; i < 10; i++ { @@ -383,7 +384,7 @@ func (f *MockTail) GetMetrics() []prometheus.Collector { return nil } func (f *MockTail) GetAggregMetrics() []prometheus.Collector { return nil } func (f *MockTail) Dump() interface{} { return f } func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("not supported") + return errors.New("not supported") } func (f *MockTail) GetUuid() string { return "" } @@ -457,9 +458,9 @@ func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) evt.Line.Src = "test" out <- evt } - t.Kill(fmt.Errorf("got error (tomb)")) + t.Kill(errors.New("got error (tomb)")) - return fmt.Errorf("got error") + return errors.New("got error") } func TestStartAcquisitionTailError(t *testing.T) { @@ -512,7 +513,7 @@ func (f *MockSourceByDSN) GetName() string func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { dsn = strings.TrimPrefix(dsn, "mockdsn://") if dsn != "test_expect" { - return fmt.Errorf("unexpected value") + return errors.New("unexpected value") } return nil diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 11842e61ff2..12e01ddf609 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,6 +1,7 @@ package cloudwatchacquisition import ( + "errors" "fmt" "net" "os" @@ -43,7 +44,7 @@ func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { func checkForLocalStackAvailability() error { v := os.Getenv("AWS_ENDPOINT_FORCE") if v == "" { - return fmt.Errorf("missing aws endpoint for tests : AWS_ENDPOINT_FORCE") + return errors.New("missing aws endpoint for tests : AWS_ENDPOINT_FORCE") } v = strings.TrimPrefix(v, "http://") diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 3de05ee0292..7675b90d7dd 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -1,6 +1,7 @@ package csconfig import ( + "errors" "fmt" "path/filepath" ) @@ -19,11 +20,11 @@ type ConfigurationPaths struct { func (c *Config) loadConfigurationPaths() error { var err error if c.ConfigPaths == nil { - return fmt.Errorf("no configuration paths provided") + return errors.New("no configuration paths provided") } if c.ConfigPaths.DataDir == "" { - return fmt.Errorf("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") + return errors.New("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") } if c.ConfigPaths.HubDir == "" { diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index e93870a2869..9fa3b4b3f9a 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -46,7 +46,7 @@ type pollResponse struct { ErrorMessage string `json:"error"` } -var errUnauthorized = fmt.Errorf("user is not authorized to use PAPI") +var errUnauthorized = errors.New("user is not authorized to use PAPI") const timeoutMessage = "no events before timeout" @@ -225,7 +225,7 @@ func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { func NewLongPollClient(config LongPollClientConfig) (*LongPollClient, error) { var logger *log.Entry if config.Url == (url.URL{}) { - return nil, fmt.Errorf("url is required") + return nil, errors.New("url is required") } if config.Logger == nil { logger = log.WithField("component", "longpollclient") diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 04d08cc2785..d009bd0c515 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -131,7 +131,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } for n := 0; n < count; n++ { if testFile(tests, *pctx, pnodes) != true { - return fmt.Errorf("test failed !") + return errors.New("test failed !") } } return nil @@ -296,7 +296,7 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error */ if len(testSet.Results) == 0 && len(results) == 0 { log.Fatal("No results, no tests, abort.") - return false, fmt.Errorf("no tests, no results") + return false, errors.New("no tests, no results") } reCheck: From 87564ea462254d00859ae6b3e5575490b663b0de Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 10:59:24 +0200 Subject: [PATCH 109/318] pkg/dumps,hubtest: use yaml.v3 (#2972) * pkg/dumps,hubtest: use yaml.v3 * lint (whitespace/fmt.Errorf) --- .golangci.yml | 7 ---- pkg/csplugin/broker_test.go | 5 ++- pkg/dumps/bucket_dump.go | 3 +- pkg/dumps/parser_dump.go | 2 +- pkg/hubtest/coverage.go | 32 +++++++++------ pkg/hubtest/hubtest_item.go | 72 +++++++++++++++++----------------- pkg/hubtest/parser_assert.go | 29 +++++++------- pkg/hubtest/scenario_assert.go | 18 ++++----- 8 files changed, 87 insertions(+), 81 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 9f059aa8aa9..8e60a3ca8f8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -100,13 +100,6 @@ linters-settings: - "!**/pkg/appsec/appsec.go" - "!**/pkg/appsec/loader.go" - "!**/pkg/csplugin/broker.go" - - "!**/pkg/csplugin/broker_test.go" - - "!**/pkg/dumps/bucket_dump.go" - - "!**/pkg/dumps/parser_dump.go" - - "!**/pkg/hubtest/coverage.go" - - "!**/pkg/hubtest/hubtest_item.go" - - "!**/pkg/hubtest/parser_assert.go" - - "!**/pkg/hubtest/scenario_assert.go" - "!**/pkg/leakybucket/buckets_test.go" - "!**/pkg/leakybucket/manager_load.go" - "!**/pkg/metabase/metabase.go" diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 9adb35ad7cc..34c9ce7d684 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -48,7 +48,7 @@ func (s *PluginSuite) writeconfig(config PluginConfig) { data, err := yaml.Marshal(&config) require.NoError(t, err, "unable to marshal config file") - err = os.WriteFile(s.pluginConfig, data, 0644) + err = os.WriteFile(s.pluginConfig, data, 0o644) require.NoError(t, err, "unable to write config file %s", s.pluginConfig) } @@ -135,6 +135,7 @@ func (s *PluginSuite) TestBrokerInit() { if tc.action != nil { tc.action(t) } + _, err := s.InitBroker(&tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) diff --git a/pkg/dumps/bucket_dump.go b/pkg/dumps/bucket_dump.go index 5f5ce1c4028..328c581928b 100644 --- a/pkg/dumps/bucket_dump.go +++ b/pkg/dumps/bucket_dump.go @@ -4,8 +4,9 @@ import ( "io" "os" + "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/types" - "gopkg.in/yaml.v2" ) type BucketPourInfo map[string][]types.Event diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 9b4cdb1c22b..227f96315d4 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -12,7 +12,7 @@ import ( "github.com/fatih/color" diff "github.com/r3labs/diff/v2" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/maptools" diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index dc3d1d13ad2..4156def06d7 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -2,27 +2,30 @@ package hubtest import ( "bufio" + "errors" "fmt" "os" "path/filepath" "strings" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/go-cs-lib/maptools" - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" ) type Coverage struct { Name string TestsCount int - PresentIn map[string]bool //poorman's set + PresentIn map[string]bool // poorman's set } func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.APPSEC_RULES)) == 0 { - return nil, fmt.Errorf("no appsec rules in hub index") + return nil, errors.New("no appsec rules in hub index") } // populate from hub, iterate in alphabetical order @@ -40,16 +43,18 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { // parser the expressions a-la-oneagain appsecTestConfigs, err := filepath.Glob(".appsec-tests/*/config.yaml") if err != nil { - return nil, fmt.Errorf("while find appsec-tests config: %s", err) + return nil, fmt.Errorf("while find appsec-tests config: %w", err) } for _, appsecTestConfigPath := range appsecTestConfigs { configFileData := &HubTestItemConfig{} + yamlFile, err := os.ReadFile(appsecTestConfigPath) if err != nil { log.Printf("unable to open appsec test config file '%s': %s", appsecTestConfigPath, err) continue } + err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { return nil, fmt.Errorf("unmarshal: %v", err) @@ -57,14 +62,17 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { for _, appsecRulesFile := range configFileData.AppsecRules { appsecRuleData := &appsec_rule.CustomRule{} + yamlFile, err := os.ReadFile(appsecRulesFile) if err != nil { log.Printf("unable to open appsec rule '%s': %s", appsecRulesFile, err) } + err = yaml.Unmarshal(yamlFile, appsecRuleData) if err != nil { return nil, fmt.Errorf("unmarshal: %v", err) } + appsecRuleName := appsecRuleData.Name for idx, cov := range coverage { @@ -81,7 +89,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.PARSERS)) == 0 { - return nil, fmt.Errorf("no parsers in hub index") + return nil, errors.New("no parsers in hub index") } // populate from hub, iterate in alphabetical order @@ -99,13 +107,13 @@ func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { // parser the expressions a-la-oneagain passerts, err := filepath.Glob(".tests/*/parser.assert") if err != nil { - return nil, fmt.Errorf("while find parser asserts : %s", err) + return nil, fmt.Errorf("while find parser asserts: %w", err) } for _, assert := range passerts { file, err := os.Open(assert) if err != nil { - return nil, fmt.Errorf("while reading %s : %s", assert, err) + return nil, fmt.Errorf("while reading %s: %w", assert, err) } scanner := bufio.NewScanner(file) @@ -167,7 +175,7 @@ func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.SCENARIOS)) == 0 { - return nil, fmt.Errorf("no scenarios in hub index") + return nil, errors.New("no scenarios in hub index") } // populate from hub, iterate in alphabetical order @@ -185,13 +193,13 @@ func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { // parser the expressions a-la-oneagain passerts, err := filepath.Glob(".tests/*/scenario.assert") if err != nil { - return nil, fmt.Errorf("while find scenario asserts : %s", err) + return nil, fmt.Errorf("while find scenario asserts: %w", err) } for _, assert := range passerts { file, err := os.Open(assert) if err != nil { - return nil, fmt.Errorf("while reading %s : %s", assert, err) + return nil, fmt.Errorf("while reading %s: %w", assert, err) } scanner := bufio.NewScanner(file) diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index b8a042f071f..1a2b4863105 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -10,7 +10,7 @@ import ( "strings" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -28,7 +28,7 @@ type HubTestItemConfig struct { LogType string `yaml:"log_type,omitempty"` Labels map[string]string `yaml:"labels,omitempty"` IgnoreParsers bool `yaml:"ignore_parsers,omitempty"` // if we test a scenario, we don't want to assert on Parser - OverrideStatics []parser.ExtraField `yaml:"override_statics,omitempty"` //Allow to override statics. Executed before s00 + OverrideStatics []parser.ExtraField `yaml:"override_statics,omitempty"` // Allow to override statics. Executed before s00 } type HubTestItem struct { @@ -200,12 +200,12 @@ func (t *HubTestItem) InstallHub() error { b, err := yaml.Marshal(n) if err != nil { - return fmt.Errorf("unable to marshal overrides: %s", err) + return fmt.Errorf("unable to marshal overrides: %w", err) } tgtFilename := fmt.Sprintf("%s/parsers/s00-raw/00_overrides.yaml", t.RuntimePath) if err := os.WriteFile(tgtFilename, b, os.ModePerm); err != nil { - return fmt.Errorf("unable to write overrides to '%s': %s", tgtFilename, err) + return fmt.Errorf("unable to write overrides to '%s': %w", tgtFilename, err) } } @@ -267,10 +267,10 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } if err := os.Chdir(testPath); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %s", testPath, err) + return fmt.Errorf("can't 'cd' to '%s': %w", testPath, err) } - //machine add + // machine add cmdArgs := []string{"-c", t.RuntimeConfigFilePath, "machines", "add", "testMachine", "--force", "--auto"} cscliRegisterCmd := exec.Command(t.CscliPath, cmdArgs...) @@ -282,7 +282,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } } - //hardcode bouncer key + // hardcode bouncer key cmdArgs = []string{"-c", t.RuntimeConfigFilePath, "bouncers", "add", "appsectests", "-k", TestBouncerApiKey} cscliBouncerCmd := exec.Command(t.CscliPath, cmdArgs...) @@ -294,13 +294,13 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } } - //start crowdsec service + // start crowdsec service cmdArgs = []string{"-c", t.RuntimeConfigFilePath} crowdsecDaemon := exec.Command(t.CrowdSecPath, cmdArgs...) crowdsecDaemon.Start() - //wait for the appsec port to be available + // wait for the appsec port to be available if _, err := IsAlive(t.AppSecHost); err != nil { crowdsecLog, err2 := os.ReadFile(crowdsecLogFile) if err2 != nil { @@ -310,27 +310,27 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { log.Errorf("%s\n", string(crowdsecLog)) } - return fmt.Errorf("appsec is down: %s", err) + return fmt.Errorf("appsec is down: %w", err) } // check if the target is available nucleiTargetParsedURL, err := url.Parse(t.NucleiTargetHost) if err != nil { - return fmt.Errorf("unable to parse target '%s': %s", t.NucleiTargetHost, err) + return fmt.Errorf("unable to parse target '%s': %w", t.NucleiTargetHost, err) } nucleiTargetHost := nucleiTargetParsedURL.Host if _, err := IsAlive(nucleiTargetHost); err != nil { - return fmt.Errorf("target is down: %s", err) + return fmt.Errorf("target is down: %w", err) } nucleiConfig := NucleiConfig{ Path: "nuclei", OutputDir: t.RuntimePath, - CmdLineOptions: []string{"-ev", //allow variables from environment - "-nc", //no colors in output - "-dresp", //dump response - "-j", //json output + CmdLineOptions: []string{"-ev", // allow variables from environment + "-nc", // no colors in output + "-dresp", // dump response + "-j", // json output }, } @@ -341,6 +341,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { t.Success = true } else { log.Errorf("Appsec test %s failed: %s", t.Name, err) + crowdsecLog, err := os.ReadFile(crowdsecLogFile) if err != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -355,6 +356,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { t.Success = true } else { log.Errorf("Appsec test %s failed: %s", t.Name, err) + crowdsecLog, err := os.ReadFile(crowdsecLogFile) if err != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -376,7 +378,7 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) } - currentDir, err := os.Getwd() //xx + currentDir, err := os.Getwd() // xx if err != nil { return fmt.Errorf("can't get current directory: %+v", err) } @@ -397,7 +399,7 @@ func (t *HubTestItem) RunWithLogFile() error { } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { - return fmt.Errorf("unable to copy .index.json file in '%s': %s", filepath.Join(t.RuntimeHubPath, ".index.json"), err) + return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } // create results folder @@ -424,12 +426,12 @@ func (t *HubTestItem) RunWithLogFile() error { // copy template patterns folder to runtime folder if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %s", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) } // install the hub in the runtime folder if err = t.InstallHub(); err != nil { - return fmt.Errorf("unable to install hub in '%s': %s", t.RuntimeHubPath, err) + return fmt.Errorf("unable to install hub in '%s': %w", t.RuntimeHubPath, err) } logFile := t.Config.LogFile @@ -437,12 +439,12 @@ func (t *HubTestItem) RunWithLogFile() error { dsn := fmt.Sprintf("file://%s", logFile) if err = os.Chdir(testPath); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %s", testPath, err) + return fmt.Errorf("can't 'cd' to '%s': %w", testPath, err) } logFileStat, err := os.Stat(logFile) if err != nil { - return fmt.Errorf("unable to stat log file '%s': %s", logFile, err) + return fmt.Errorf("unable to stat log file '%s': %w", logFile, err) } if logFileStat.Size() == 0 { @@ -481,7 +483,7 @@ func (t *HubTestItem) RunWithLogFile() error { } if err := os.Chdir(currentDir); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %s", currentDir, err) + return fmt.Errorf("can't 'cd' to '%s': %w", currentDir, err) } // assert parsers @@ -498,20 +500,20 @@ func (t *HubTestItem) RunWithLogFile() error { assertFileStat, err := os.Stat(t.ParserAssert.File) if err != nil { - return fmt.Errorf("error while stats '%s': %s", t.ParserAssert.File, err) + return fmt.Errorf("error while stats '%s': %w", t.ParserAssert.File, err) } if assertFileStat.Size() == 0 { assertData, err := t.ParserAssert.AutoGenFromFile(t.ParserResultFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } t.ParserAssert.AutoGenAssertData = assertData t.ParserAssert.AutoGenAssert = true } else { if err := t.ParserAssert.AssertFile(t.ParserResultFile); err != nil { - return fmt.Errorf("unable to run assertion on file '%s': %s", t.ParserResultFile, err) + return fmt.Errorf("unable to run assertion on file '%s': %w", t.ParserResultFile, err) } } } @@ -540,20 +542,20 @@ func (t *HubTestItem) RunWithLogFile() error { assertFileStat, err := os.Stat(t.ScenarioAssert.File) if err != nil { - return fmt.Errorf("error while stats '%s': %s", t.ScenarioAssert.File, err) + return fmt.Errorf("error while stats '%s': %w", t.ScenarioAssert.File, err) } if assertFileStat.Size() == 0 { assertData, err := t.ScenarioAssert.AutoGenFromFile(t.ScenarioResultFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } t.ScenarioAssert.AutoGenAssertData = assertData t.ScenarioAssert.AutoGenAssert = true } else { if err := t.ScenarioAssert.AssertFile(t.ScenarioResultFile); err != nil { - return fmt.Errorf("unable to run assertion on file '%s': %s", t.ScenarioResultFile, err) + return fmt.Errorf("unable to run assertion on file '%s': %w", t.ScenarioResultFile, err) } } } @@ -591,7 +593,7 @@ func (t *HubTestItem) Run() error { } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { - return fmt.Errorf("unable to copy .index.json file in '%s': %s", filepath.Join(t.RuntimeHubPath, ".index.json"), err) + return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } // create results folder @@ -618,7 +620,7 @@ func (t *HubTestItem) Run() error { // copy template patterns folder to runtime folder if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %s", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) } // create the appsec-configs dir @@ -626,7 +628,7 @@ func (t *HubTestItem) Run() error { return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) } - //if it's an appsec rule test, we need acquis and appsec profile + // if it's an appsec rule test, we need acquis and appsec profile if len(t.Config.AppsecRules) > 0 { // copy template acquis file to runtime folder log.Debugf("copying %s to %s", t.TemplateAcquisPath, t.RuntimeAcquisFilePath) @@ -640,15 +642,15 @@ func (t *HubTestItem) Run() error { if err = Copy(t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml")); err != nil { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml"), err) } - } else { //otherwise we drop a blank acquis file + } else { // otherwise we drop a blank acquis file if err = os.WriteFile(t.RuntimeAcquisFilePath, []byte(""), os.ModePerm); err != nil { - return fmt.Errorf("unable to write blank acquis file '%s': %s", t.RuntimeAcquisFilePath, err) + return fmt.Errorf("unable to write blank acquis file '%s': %w", t.RuntimeAcquisFilePath, err) } } // install the hub in the runtime folder if err = t.InstallHub(); err != nil { - return fmt.Errorf("unable to install hub in '%s': %s", t.RuntimeHubPath, err) + return fmt.Errorf("unable to install hub in '%s': %w", t.RuntimeHubPath, err) } if t.Config.LogFile != "" { diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index 7eec8e535e5..d79d26fb9d0 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -2,17 +2,19 @@ package hubtest import ( "bufio" + "errors" "fmt" "os" "strings" "github.com/antonmedv/expr" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/go-cs-lib/maptools" ) type AssertFail struct { @@ -69,13 +71,12 @@ func (p *ParserAssert) LoadTest(filename string) error { func (p *ParserAssert) AssertFile(testFile string) error { file, err := os.Open(p.File) - if err != nil { - return fmt.Errorf("failed to open") + return errors.New("failed to open") } if err := p.LoadTest(testFile); err != nil { - return fmt.Errorf("unable to load parser dump file '%s': %s", testFile, err) + return fmt.Errorf("unable to load parser dump file '%s': %w", testFile, err) } scanner := bufio.NewScanner(file) @@ -107,6 +108,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { } match := variableRE.FindStringSubmatch(scanner.Text()) + var variable string if len(match) == 0 { @@ -127,7 +129,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { continue } - //fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) + // fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) } file.Close() @@ -135,7 +137,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { if p.NbAssert == 0 { assertData, err := p.AutoGenFromFile(testFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } p.AutoGenAssertData = assertData @@ -150,8 +152,8 @@ func (p *ParserAssert) AssertFile(testFile string) error { } func (p *ParserAssert) RunExpression(expression string) (interface{}, error) { - //debug doesn't make much sense with the ability to evaluate "on the fly" - //var debugFilter *exprhelpers.ExprDebugger + // debug doesn't make much sense with the ability to evaluate "on the fly" + // var debugFilter *exprhelpers.ExprDebugger var output interface{} env := map[string]interface{}{"results": *p.TestData} @@ -162,7 +164,7 @@ func (p *ParserAssert) RunExpression(expression string) (interface{}, error) { return output, err } - //dump opcode in trace level + // dump opcode in trace level log.Tracef("%s", runtimeFilter.Disassemble()) output, err = expr.Run(runtimeFilter, env) @@ -183,7 +185,6 @@ func (p *ParserAssert) EvalExpression(expression string) (string, error) { } ret, err := yaml.Marshal(output) - if err != nil { return "", err } @@ -213,16 +214,16 @@ func Escape(val string) string { } func (p *ParserAssert) AutoGenParserAssert() string { - //attempt to autogen parser asserts + // attempt to autogen parser asserts ret := fmt.Sprintf("len(results) == %d\n", len(*p.TestData)) - //sort map keys for consistent order + // sort map keys for consistent order stages := maptools.SortedKeys(*p.TestData) for _, stage := range stages { parsers := (*p.TestData)[stage] - //sort map keys for consistent order + // sort map keys for consistent order pnames := maptools.SortedKeys(parsers) for _, parser := range pnames { diff --git a/pkg/hubtest/scenario_assert.go b/pkg/hubtest/scenario_assert.go index 5195b814ef3..bb004daad49 100644 --- a/pkg/hubtest/scenario_assert.go +++ b/pkg/hubtest/scenario_assert.go @@ -2,6 +2,7 @@ package hubtest import ( "bufio" + "errors" "fmt" "io" "os" @@ -10,7 +11,7 @@ import ( "github.com/antonmedv/expr" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -77,13 +78,12 @@ func (s *ScenarioAssert) LoadTest(filename string, bucketpour string) error { func (s *ScenarioAssert) AssertFile(testFile string) error { file, err := os.Open(s.File) - if err != nil { - return fmt.Errorf("failed to open") + return errors.New("failed to open") } if err := s.LoadTest(testFile, ""); err != nil { - return fmt.Errorf("unable to load parser dump file '%s': %s", testFile, err) + return fmt.Errorf("unable to load parser dump file '%s': %w", testFile, err) } scanner := bufio.NewScanner(file) @@ -134,7 +134,7 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { continue } - //fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) + // fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) } file.Close() @@ -142,7 +142,7 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { if s.NbAssert == 0 { assertData, err := s.AutoGenFromFile(testFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } s.AutoGenAssertData = assertData @@ -157,8 +157,8 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { } func (s *ScenarioAssert) RunExpression(expression string) (interface{}, error) { - //debug doesn't make much sense with the ability to evaluate "on the fly" - //var debugFilter *exprhelpers.ExprDebugger + // debug doesn't make much sense with the ability to evaluate "on the fly" + // var debugFilter *exprhelpers.ExprDebugger var output interface{} env := map[string]interface{}{"results": *s.TestData} @@ -171,7 +171,7 @@ func (s *ScenarioAssert) RunExpression(expression string) (interface{}, error) { // log.Warningf("Failed building debugher for %s : %s", assert, err) // } - //dump opcode in trace level + // dump opcode in trace level log.Tracef("%s", runtimeFilter.Disassemble()) output, err = expr.Run(runtimeFilter, map[string]interface{}{"results": *s.TestData}) From c70a2fe9bf8122a154a89d7a54875b55fb71abf1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 11:01:31 +0200 Subject: [PATCH 110/318] update go dependencies (#2968) --- go.mod | 48 ++++++++++++----------- go.sum | 120 ++++++++++++++++++++++++++++----------------------------- 2 files changed, 85 insertions(+), 83 deletions(-) diff --git a/go.mod b/go.mod index 8afc2b8a095..d78908db0c6 100644 --- a/go.mod +++ b/go.mod @@ -11,18 +11,18 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 - github.com/agext/levenshtein v1.2.1 + github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 github.com/antonmedv/expr v1.15.3 - github.com/appleboy/gin-jwt/v2 v2.8.0 + github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aquasecurity/table v1.8.0 - github.com/aws/aws-lambda-go v1.41.0 - github.com/aws/aws-sdk-go v1.48.15 - github.com/beevik/etree v1.1.0 - github.com/blackfireio/osinfo v1.0.3 + github.com/aws/aws-lambda-go v1.47.0 + github.com/aws/aws-sdk-go v1.52.0 + github.com/beevik/etree v1.3.0 + github.com/blackfireio/osinfo v1.0.5 github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 - github.com/c-robinson/iplib v1.0.3 + github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.2.0 github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 @@ -31,13 +31,13 @@ require ( github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 - github.com/dghubble/sling v1.3.0 + github.com/dghubble/sling v1.4.2 github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 - github.com/fatih/color v1.15.0 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fatih/color v1.16.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.9.1 - github.com/go-co-op/gocron v1.17.0 + github.com/go-co-op/gocron v1.37.0 github.com/go-openapi/errors v0.20.1 github.com/go-openapi/strfmt v0.19.11 github.com/go-openapi/swag v0.22.3 @@ -46,8 +46,8 @@ require ( github.com/goccy/go-yaml v1.11.0 github.com/gofrs/uuid v4.0.0+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/google/go-querystring v1.0.0 - github.com/google/uuid v1.3.0 + github.com/google/go-querystring v1.1.0 + github.com/google/uuid v1.6.0 github.com/google/winops v0.0.0-20230712152054-af9b550d0601 github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e github.com/gorilla/websocket v1.5.0 @@ -60,7 +60,7 @@ require ( github.com/jarcoal/httpmock v1.1.0 github.com/jszwec/csvutil v1.5.1 github.com/lithammer/dedent v1.1.0 - github.com/mattn/go-isatty v0.0.19 + github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nxadm/tail v1.4.8 @@ -103,14 +103,15 @@ require ( github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.9.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/bytedance/sonic v1.10.2 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -123,7 +124,7 @@ require ( github.com/go-openapi/spec v0.20.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.14.0 // indirect + github.com/go-playground/validator/v10 v10.17.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -148,8 +149,8 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.17.3 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect - github.com/leodido/go-urn v1.2.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -169,7 +170,7 @@ require ( github.com/oklog/run v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -192,12 +193,13 @@ require ( github.com/tklauser/numcpus v0.6.0 // indirect github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.11 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect - golang.org/x/arch v0.3.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + golang.org/x/arch v0.7.0 // indirect golang.org/x/net v0.24.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/term v0.19.0 // indirect diff --git a/go.sum b/go.sum index 750439e4f0e..29af3c795fc 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDe github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:pzStYMLAXM7CNQjS/Wn+zK9MUxDhSUNfVvnHsyQyjs0= github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:ilK+u7u1HoqaDk0mjhh27QJB7PyWMreGffEvOCoEKiY= @@ -43,8 +43,8 @@ github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/appleboy/gin-jwt/v2 v2.8.0 h1:Glo7cb9eBR+hj8Y7WzgfkOlqCaNLjP+RV4dNO3fpdps= -github.com/appleboy/gin-jwt/v2 v2.8.0/go.mod h1:KsK7E8HTvRg3vOiumTsr/ntNTHbZ3IbHLe4Eto31p7k= +github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5poiZ7Y= +github.com/appleboy/gin-jwt/v2 v2.9.2/go.mod h1:mxGjKt9Lrx9Xusy1SrnmsCJMZG6UJwmdHN9bN27/QDw= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= @@ -55,33 +55,38 @@ github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:o github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= -github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= +github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= +github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.48.15 h1:Gad2C4pLzuZDd5CA0Rvkfko6qUDDTOYru145gkO7w/Y= -github.com/aws/aws-sdk-go v1.48.15/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= -github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= +github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= +github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blackfireio/osinfo v1.0.3 h1:Yk2t2GTPjBcESv6nDSWZKO87bGMQgO+Hi9OoXPpxX8c= -github.com/blackfireio/osinfo v1.0.3/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA= +github.com/blackfireio/osinfo v1.0.5 h1:6hlaWzfcpb87gRmznVf7wSdhysGqLRz9V/xuSdCEXrA= +github.com/blackfireio/osinfo v1.0.5/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA= github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/c-robinson/iplib v1.0.3 h1:NG0UF0GoEsrC1/vyfX1Lx2Ss7CySWl3KqqXh3q4DdPU= -github.com/c-robinson/iplib v1.0.3/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE= +github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= +github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= +github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/corazawaf/libinjection-go v0.1.2 h1:oeiV9pc5rvJ+2oqOqXEAMJousPpGiup6f7Y3nZj5GoM= @@ -112,8 +117,8 @@ github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= -github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/dghubble/sling v1.4.2 h1:vs1HIGBbSl2SEALyU+irpYFLZMfc49Fp+jYryFebQjM= +github.com/dghubble/sling v1.4.2/go.mod h1:o0arCOz0HwfqYQJLrRtqunaWOn4X6jxE/6ORKRpVTD4= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= @@ -125,24 +130,23 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-co-op/gocron v1.17.0 h1:IixLXsti+Qo0wMvmn6Kmjp2csk2ykpkcL+EmHmST18w= -github.com/go-co-op/gocron v1.17.0/go.mod h1:IpDBSaJOVfFw7hXZuTag3SCSkqazXBBUkbQ1m1aesBs= +github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= +github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -241,18 +245,14 @@ github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0 github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= github.com/go-openapi/validate v0.20.0 h1:pzutNCCBZGZlE+u8HD3JZyWdc/TVbtVwlWUp8/vgUKk= github.com/go-openapi/validate v0.20.0/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= -github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74= +github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -294,7 +294,6 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= @@ -302,7 +301,6 @@ github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= @@ -317,16 +315,17 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/winops v0.0.0-20230712152054-af9b550d0601 h1:XvlrmqZIuwxuRE88S9mkxX+FkV+YakqbiAC5Z4OzDnM= github.com/google/winops v0.0.0-20230712152054-af9b550d0601/go.mod h1:rT1mcjzuvcDDbRmUTsoH6kV0DG91AkFe9UCjASraK5I= github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= @@ -417,7 +416,6 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jszwec/csvutil v1.5.1 h1:c3GFBhj6DFMUl4dMK3+B6rz2+LWWS/e9VJiVJ9t9kfQ= @@ -436,13 +434,15 @@ github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHU github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -453,9 +453,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= -github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leodido/go-urn v1.3.0 h1:jX8FDLfW4ThVXctBNZ+3cIWnCSnrACDV73r76dy0aQQ= +github.com/leodido/go-urn v1.3.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -490,8 +489,8 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= @@ -525,7 +524,6 @@ github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= @@ -553,13 +551,14 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e h1:POJco99aNgosh92lGqmx7L1ei+kCymivB/419SD15PQ= github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -597,6 +596,8 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -661,7 +662,6 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= -github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -678,10 +678,8 @@ github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 h1:PM5hJF7HVfNWmCjM github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXzZUvMacR0oF/fbDDgUPO8L36tDMmRAf14ns= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 h1:UFHFmFfixpmfRBcxuu+LA9l8MdURWVdVNUHxO5n1d2w= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26/go.mod h1:IGhd0qMDsUa9acVjsbsT7bu3ktadtGOHI79+idTew/M= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= @@ -728,6 +726,9 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -736,8 +737,8 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -831,11 +832,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -952,6 +951,7 @@ k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= From 91fbc6353368349ca1fbdf0eb0b01835827409af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 12:56:41 +0200 Subject: [PATCH 111/318] db: review update timestamps, immutable columns (#2981) * orm: correct behavior of created_at, updated_at, define immutable fields * remove updatedefault for last_push, last_heartbeat * re-generate db schema * update last_push in CreateAlert() * lint --- pkg/database/alerts.go | 124 +++++---- pkg/database/ent/alert.go | 22 +- pkg/database/ent/alert/alert.go | 2 - pkg/database/ent/alert/where.go | 20 -- pkg/database/ent/alert_create.go | 10 +- pkg/database/ent/alert_update.go | 66 +---- pkg/database/ent/bouncer.go | 22 +- pkg/database/ent/bouncer/bouncer.go | 2 - pkg/database/ent/bouncer/where.go | 20 -- pkg/database/ent/bouncer_create.go | 10 +- pkg/database/ent/bouncer_update.go | 52 +--- pkg/database/ent/configitem.go | 22 +- pkg/database/ent/configitem/configitem.go | 2 - pkg/database/ent/configitem/where.go | 20 -- pkg/database/ent/configitem_create.go | 10 +- pkg/database/ent/configitem_update.go | 66 +---- pkg/database/ent/decision.go | 22 +- pkg/database/ent/decision/decision.go | 2 - pkg/database/ent/decision/where.go | 20 -- pkg/database/ent/decision_create.go | 10 +- pkg/database/ent/decision_update.go | 66 +---- pkg/database/ent/event.go | 22 +- pkg/database/ent/event/event.go | 2 - pkg/database/ent/event/where.go | 20 -- pkg/database/ent/event_create.go | 10 +- pkg/database/ent/event_update.go | 66 +---- pkg/database/ent/lock_update.go | 34 --- pkg/database/ent/machine.go | 22 +- pkg/database/ent/machine/machine.go | 6 - pkg/database/ent/machine/where.go | 20 -- pkg/database/ent/machine_create.go | 10 +- pkg/database/ent/machine_update.go | 148 +++-------- pkg/database/ent/meta.go | 22 +- pkg/database/ent/meta/meta.go | 2 - pkg/database/ent/meta/where.go | 20 -- pkg/database/ent/meta_create.go | 10 +- pkg/database/ent/meta_update.go | 52 +--- pkg/database/ent/migrate/schema.go | 28 +- pkg/database/ent/mutation.go | 299 ++-------------------- pkg/database/ent/runtime.go | 18 -- pkg/database/ent/schema/alert.go | 4 +- pkg/database/ent/schema/bouncer.go | 4 +- pkg/database/ent/schema/config.go | 7 +- pkg/database/ent/schema/decision.go | 4 +- pkg/database/ent/schema/event.go | 4 +- pkg/database/ent/schema/lock.go | 2 +- pkg/database/ent/schema/machine.go | 12 +- pkg/database/ent/schema/meta.go | 5 +- pkg/database/machines.go | 8 - 49 files changed, 300 insertions(+), 1151 deletions(-) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 5559cbb3972..602846eb31a 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -9,9 +9,8 @@ import ( "strings" "time" - "github.com/mattn/go-sqlite3" - "github.com/davecgh/go-spew/spew" + "github.com/mattn/go-sqlite3" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -134,7 +133,7 @@ func formatAlertAsString(machineID string, alert *models.Alert) []string { // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) (string, error) { if alertItem.UUID == "" { - return "", fmt.Errorf("alert UUID is empty") + return "", errors.New("alert UUID is empty") } alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(c.CTX) @@ -143,7 +142,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) return "", fmt.Errorf("unable to query alerts for uuid %s: %w", alertItem.UUID, err) } - //alert wasn't found, insert it (expected hotpath) + // alert wasn't found, insert it (expected hotpath) if ent.IsNotFound(err) || len(alerts) == 0 { alertIDs, err := c.CreateAlert(machineID, []*models.Alert{alertItem}) if err != nil { @@ -153,14 +152,14 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) return alertIDs[0], nil } - //this should never happen + // this should never happen if len(alerts) > 1 { return "", fmt.Errorf("multiple alerts found for uuid %s", alertItem.UUID) } log.Infof("Alert %s already exists, checking associated decisions", alertItem.UUID) - //alert is found, check for any missing decisions + // alert is found, check for any missing decisions newUuids := make([]string, len(alertItem.Decisions)) for i, decItem := range alertItem.Decisions { @@ -203,14 +202,16 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) } } - //add missing decisions + // add missing decisions log.Debugf("Adding %d missing decisions to alert %s", len(missingDecisions), foundAlert.UUID) decisionBuilders := []*ent.DecisionCreate{} for _, decisionItem := range missingDecisions { - var start_ip, start_sfx, end_ip, end_sfx int64 - var sz int + var ( + start_ip, start_sfx, end_ip, end_sfx int64 + sz int + ) /*if the scope is IP or Range, convert the value to integers */ if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { @@ -227,7 +228,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) continue } - //use the created_at from the alert instead + // use the created_at from the alert instead alertTime, err := time.Parse(time.RFC3339, alertItem.CreatedAt) if err != nil { log.Errorf("unable to parse alert time %s : %s", alertItem.CreatedAt, err) @@ -268,7 +269,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) decisions = append(decisions, decisionsCreateRet...) } - //now that we bulk created missing decisions, let's update the alert + // now that we bulk created missing decisions, let's update the alert decisionChunks := slicetools.Chunks(decisions, c.decisionBulkSize) @@ -288,11 +289,11 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // 2nd pull, you get decisions [1,2,3,4]. it inserts [1,2,3,4] and will try to delete [1,2,3,4] with a different alert ID and same origin func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, int, error) { if alertItem == nil { - return 0, 0, 0, fmt.Errorf("nil alert") + return 0, 0, 0, errors.New("nil alert") } if alertItem.StartAt == nil { - return 0, 0, 0, fmt.Errorf("nil start_at") + return 0, 0, 0, errors.New("nil start_at") } startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) @@ -301,7 +302,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in } if alertItem.StopAt == nil { - return 0, 0, 0, fmt.Errorf("nil stop_at") + return 0, 0, 0, errors.New("nil stop_at") } stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) @@ -367,8 +368,10 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in valueList := make([]string, 0, len(alertItem.Decisions)) for _, decisionItem := range alertItem.Decisions { - var start_ip, start_sfx, end_ip, end_sfx int64 - var sz int + var ( + start_ip, start_sfx, end_ip, end_sfx int64 + sz int + ) if decisionItem.Duration == nil { log.Warning("nil duration in community decision") @@ -487,8 +490,10 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis decisionCreate := []*ent.DecisionCreate{} for _, decisionItem := range decisions { - var start_ip, start_sfx, end_ip, end_sfx int64 - var sz int + var ( + start_ip, start_sfx, end_ip, end_sfx int64 + sz int + ) duration, err := time.ParseDuration(*decisionItem.Duration) if err != nil { @@ -539,8 +544,10 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ alertDecisions := [][]*ent.Decision{} for _, alertItem := range alerts { - var metas []*ent.Meta - var events []*ent.Event + var ( + metas []*ent.Meta + events []*ent.Event + ) startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) if err != nil { @@ -560,7 +567,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ c.Log.Info(disp) } - //let's track when we strip or drop data, notify outside of loop to avoid spam + // let's track when we strip or drop data, notify outside of loop to avoid spam stripped := false dropped := false @@ -580,7 +587,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ return nil, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) } - //the serialized field is too big, let's try to progressively strip it + // the serialized field is too big, let's try to progressively strip it if event.SerializedValidator(string(marshallMetas)) != nil { stripped = true @@ -606,7 +613,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ stripSize /= 2 } - //nothing worked, drop it + // nothing worked, drop it if !valid { dropped = true stripped = false @@ -635,17 +642,21 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(alertItem.Meta) > 0 { metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) + for i, metaItem := range alertItem.Meta { key := metaItem.Key value := metaItem.Value + if len(metaItem.Value) > 4095 { c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) value = value[:4095] } + if len(metaItem.Key) > 255 { c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) key = key[:255] } + metaBulk[i] = c.Ent.Meta.Create(). SetKey(key). SetValue(value) @@ -765,8 +776,10 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { - var owner *ent.Machine - var err error + var ( + owner *ent.Machine + err error + ) if machineID != "" { owner, err = c.QueryMachineByID(machineID) @@ -795,17 +808,27 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str alertIDs = append(alertIDs, ids...) } + if owner != nil { + err = owner.Update().SetLastPush(time.Now().UTC()).Exec(c.CTX) + if err != nil { + return nil, fmt.Errorf("machine '%s': %w", machineID, err) + } + } + return alertIDs, nil } func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { predicates := make([]predicate.Alert, 0) - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var hasActiveDecision bool - var ip_sz int - var contains = true + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + hasActiveDecision bool + ip_sz int + ) + + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -829,7 +852,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } case "scope": - var scope = value[0] + scope := value[0] if strings.ToLower(scope) == "ip" { scope = types.Ip } else if strings.ToLower(scope) == "range" { @@ -886,17 +909,17 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) case "origin": predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) - case "include_capi": //allows to exclude one or more specific origins + case "include_capi": // allows to exclude one or more specific origins if value[0] == "false" { predicates = append(predicates, alert.And( - //do not show alerts with active decisions having origin CAPI or lists + // do not show alerts with active decisions having origin CAPI or lists alert.And( alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), ), alert.Not( alert.And( - //do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI alert.Not(alert.HasDecisions()), alert.Or( alert.SourceScopeHasPrefix(types.ListOrigin+":"), @@ -906,7 +929,6 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e ), ), ) - } else if value[0] != "true" { log.Errorf("Invalid bool '%s' for include_capi", value[0]) } @@ -950,48 +972,48 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } else if ip_sz == 16 { if contains { /*decision contains {start_ip,end_ip}*/ predicates = append(predicates, alert.And( - //matching addr size + // matching addr size alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), alert.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip alert.HasDecisionsWith(decision.StartIPLT(start_ip)), alert.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), )), alert.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip alert.HasDecisionsWith(decision.EndIPGT(end_ip)), alert.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), ), ), )) } else { /*decision is contained within {start_ip,end_ip}*/ predicates = append(predicates, alert.And( - //matching addr size + // matching addr size alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), alert.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip alert.HasDecisionsWith(decision.StartIPGT(start_ip)), alert.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), )), alert.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip alert.HasDecisionsWith(decision.EndIPLT(end_ip)), alert.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), ), ), @@ -1024,13 +1046,11 @@ func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string query := c.Ent.Alert.Query() query, err := BuildAlertRequestFromFilter(query, filters) - if err != nil { return nil, fmt.Errorf("failed to build alert request: %w", err) } err = query.GroupBy(alert.FieldScenario).Aggregate(ent.Count()).Scan(ctx, &res) - if err != nil { return nil, fmt.Errorf("failed to count alerts per scenario: %w", err) } @@ -1081,7 +1101,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, return nil, err } - //only if with_decisions is present and set to false, we exclude this + // only if with_decisions is present and set to false, we exclude this if val, ok := filter["with_decisions"]; ok && val[0] == "false" { c.Log.Debugf("skipping decisions") } else { diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 5cb4d1a352c..6da9f0efe76 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -19,9 +19,9 @@ type Alert struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Scenario holds the value of the "scenario" field. Scenario string `json:"scenario,omitempty"` // BucketId holds the value of the "bucketId" field. @@ -168,15 +168,13 @@ func (a *Alert) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - a.CreatedAt = new(time.Time) - *a.CreatedAt = value.Time + a.CreatedAt = value.Time } case alert.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - a.UpdatedAt = new(time.Time) - *a.UpdatedAt = value.Time + a.UpdatedAt = value.Time } case alert.FieldScenario: if value, ok := values[i].(*sql.NullString); !ok { @@ -367,15 +365,11 @@ func (a *Alert) String() string { var builder strings.Builder builder.WriteString("Alert(") builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) - if v := a.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := a.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(a.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("scenario=") builder.WriteString(a.Scenario) diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index eb9f1d10788..16e0b019e14 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -152,8 +152,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index 516ead50636..c109b78704b 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -210,16 +210,6 @@ func CreatedAtLTE(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Alert { - return predicate.Alert(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Alert { - return predicate.Alert(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) @@ -260,16 +250,6 @@ func UpdatedAtLTE(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Alert { - return predicate.Alert(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Alert { - return predicate.Alert(sql.FieldNotNull(FieldUpdatedAt)) -} - // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldScenario, v)) diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index c7498442c06..45a6e40b64f 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -473,6 +473,12 @@ func (ac *AlertCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ac *AlertCreate) check() error { + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Alert.created_at"`)} + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Alert.updated_at"`)} + } if _, ok := ac.mutation.Scenario(); !ok { return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Alert.scenario"`)} } @@ -507,11 +513,11 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { ) if value, ok := ac.mutation.CreatedAt(); ok { _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := ac.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := ac.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index f8a4d108527..8b88c35c7d7 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -32,30 +32,12 @@ func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate { return au } -// SetCreatedAt sets the "created_at" field. -func (au *AlertUpdate) SetCreatedAt(t time.Time) *AlertUpdate { - au.mutation.SetCreatedAt(t) - return au -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (au *AlertUpdate) ClearCreatedAt() *AlertUpdate { - au.mutation.ClearCreatedAt() - return au -} - // SetUpdatedAt sets the "updated_at" field. func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { au.mutation.SetUpdatedAt(t) return au } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (au *AlertUpdate) ClearUpdatedAt() *AlertUpdate { - au.mutation.ClearUpdatedAt() - return au -} - // SetScenario sets the "scenario" field. func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { au.mutation.SetScenario(s) @@ -660,11 +642,7 @@ func (au *AlertUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (au *AlertUpdate) defaults() { - if _, ok := au.mutation.CreatedAt(); !ok && !au.mutation.CreatedAtCleared() { - v := alert.UpdateDefaultCreatedAt() - au.mutation.SetCreatedAt(v) - } - if _, ok := au.mutation.UpdatedAt(); !ok && !au.mutation.UpdatedAtCleared() { + if _, ok := au.mutation.UpdatedAt(); !ok { v := alert.UpdateDefaultUpdatedAt() au.mutation.SetUpdatedAt(v) } @@ -679,18 +657,9 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := au.mutation.CreatedAt(); ok { - _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - } - if au.mutation.CreatedAtCleared() { - _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) - } if value, ok := au.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if au.mutation.UpdatedAtCleared() { - _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) - } if value, ok := au.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) } @@ -1007,30 +976,12 @@ type AlertUpdateOne struct { mutation *AlertMutation } -// SetCreatedAt sets the "created_at" field. -func (auo *AlertUpdateOne) SetCreatedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetCreatedAt(t) - return auo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (auo *AlertUpdateOne) ClearCreatedAt() *AlertUpdateOne { - auo.mutation.ClearCreatedAt() - return auo -} - // SetUpdatedAt sets the "updated_at" field. func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { auo.mutation.SetUpdatedAt(t) return auo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (auo *AlertUpdateOne) ClearUpdatedAt() *AlertUpdateOne { - auo.mutation.ClearUpdatedAt() - return auo -} - // SetScenario sets the "scenario" field. func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { auo.mutation.SetScenario(s) @@ -1648,11 +1599,7 @@ func (auo *AlertUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (auo *AlertUpdateOne) defaults() { - if _, ok := auo.mutation.CreatedAt(); !ok && !auo.mutation.CreatedAtCleared() { - v := alert.UpdateDefaultCreatedAt() - auo.mutation.SetCreatedAt(v) - } - if _, ok := auo.mutation.UpdatedAt(); !ok && !auo.mutation.UpdatedAtCleared() { + if _, ok := auo.mutation.UpdatedAt(); !ok { v := alert.UpdateDefaultUpdatedAt() auo.mutation.SetUpdatedAt(v) } @@ -1684,18 +1631,9 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } } } - if value, ok := auo.mutation.CreatedAt(); ok { - _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - } - if auo.mutation.CreatedAtCleared() { - _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) - } if value, ok := auo.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if auo.mutation.UpdatedAtCleared() { - _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) - } if value, ok := auo.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 203f49a432d..7a3b280f53d 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -18,9 +18,9 @@ type Bouncer struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at"` // Name holds the value of the "name" field. Name string `json:"name"` // APIKey holds the value of the "api_key" field. @@ -80,15 +80,13 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - b.CreatedAt = new(time.Time) - *b.CreatedAt = value.Time + b.CreatedAt = value.Time } case bouncer.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - b.UpdatedAt = new(time.Time) - *b.UpdatedAt = value.Time + b.UpdatedAt = value.Time } case bouncer.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -180,15 +178,11 @@ func (b *Bouncer) String() string { var builder strings.Builder builder.WriteString("Bouncer(") builder.WriteString(fmt.Sprintf("id=%v, ", b.ID)) - if v := b.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(b.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := b.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(b.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(b.Name) diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 24d230d3b54..7683c07752b 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -68,8 +68,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 5bf721dbf51..ee28d7bb6ff 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -149,16 +149,6 @@ func CreatedAtLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) @@ -199,16 +189,6 @@ func UpdatedAtLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldUpdatedAt)) -} - // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldName, v)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 3d08277dcfb..ceff4db583e 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -213,6 +213,12 @@ func (bc *BouncerCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (bc *BouncerCreate) check() error { + if _, ok := bc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Bouncer.created_at"`)} + } + if _, ok := bc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Bouncer.updated_at"`)} + } if _, ok := bc.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Bouncer.name"`)} } @@ -256,11 +262,11 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { ) if value, ok := bc.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := bc.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := bc.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index b3f5e1a5540..76968cf5202 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -34,9 +34,11 @@ func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { return bu } -// ClearCreatedAt clears the value of the "created_at" field. -func (bu *BouncerUpdate) ClearCreatedAt() *BouncerUpdate { - bu.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableCreatedAt(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetCreatedAt(*t) + } return bu } @@ -46,12 +48,6 @@ func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { return bu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (bu *BouncerUpdate) ClearUpdatedAt() *BouncerUpdate { - bu.mutation.ClearUpdatedAt() - return bu -} - // SetName sets the "name" field. func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { bu.mutation.SetName(s) @@ -237,11 +233,7 @@ func (bu *BouncerUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (bu *BouncerUpdate) defaults() { - if _, ok := bu.mutation.CreatedAt(); !ok && !bu.mutation.CreatedAtCleared() { - v := bouncer.UpdateDefaultCreatedAt() - bu.mutation.SetCreatedAt(v) - } - if _, ok := bu.mutation.UpdatedAt(); !ok && !bu.mutation.UpdatedAtCleared() { + if _, ok := bu.mutation.UpdatedAt(); !ok { v := bouncer.UpdateDefaultUpdatedAt() bu.mutation.SetUpdatedAt(v) } @@ -259,15 +251,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } - if bu.mutation.CreatedAtCleared() { - _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) - } if value, ok := bu.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if bu.mutation.UpdatedAtCleared() { - _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) - } if value, ok := bu.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) } @@ -333,9 +319,11 @@ func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { return buo } -// ClearCreatedAt clears the value of the "created_at" field. -func (buo *BouncerUpdateOne) ClearCreatedAt() *BouncerUpdateOne { - buo.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableCreatedAt(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetCreatedAt(*t) + } return buo } @@ -345,12 +333,6 @@ func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { return buo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (buo *BouncerUpdateOne) ClearUpdatedAt() *BouncerUpdateOne { - buo.mutation.ClearUpdatedAt() - return buo -} - // SetName sets the "name" field. func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { buo.mutation.SetName(s) @@ -549,11 +531,7 @@ func (buo *BouncerUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (buo *BouncerUpdateOne) defaults() { - if _, ok := buo.mutation.CreatedAt(); !ok && !buo.mutation.CreatedAtCleared() { - v := bouncer.UpdateDefaultCreatedAt() - buo.mutation.SetCreatedAt(v) - } - if _, ok := buo.mutation.UpdatedAt(); !ok && !buo.mutation.UpdatedAtCleared() { + if _, ok := buo.mutation.UpdatedAt(); !ok { v := bouncer.UpdateDefaultUpdatedAt() buo.mutation.SetUpdatedAt(v) } @@ -588,15 +566,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } - if buo.mutation.CreatedAtCleared() { - _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) - } if value, ok := buo.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if buo.mutation.UpdatedAtCleared() { - _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) - } if value, ok := buo.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) } diff --git a/pkg/database/ent/configitem.go b/pkg/database/ent/configitem.go index 467e54386f6..bdf23ef4948 100644 --- a/pkg/database/ent/configitem.go +++ b/pkg/database/ent/configitem.go @@ -18,9 +18,9 @@ type ConfigItem struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at"` // Name holds the value of the "name" field. Name string `json:"name"` // Value holds the value of the "value" field. @@ -64,15 +64,13 @@ func (ci *ConfigItem) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - ci.CreatedAt = new(time.Time) - *ci.CreatedAt = value.Time + ci.CreatedAt = value.Time } case configitem.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - ci.UpdatedAt = new(time.Time) - *ci.UpdatedAt = value.Time + ci.UpdatedAt = value.Time } case configitem.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -122,15 +120,11 @@ func (ci *ConfigItem) String() string { var builder strings.Builder builder.WriteString("ConfigItem(") builder.WriteString(fmt.Sprintf("id=%v, ", ci.ID)) - if v := ci.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(ci.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := ci.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(ci.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(ci.Name) diff --git a/pkg/database/ent/configitem/configitem.go b/pkg/database/ent/configitem/configitem.go index a6ff6c32d57..611d81a3960 100644 --- a/pkg/database/ent/configitem/configitem.go +++ b/pkg/database/ent/configitem/configitem.go @@ -47,8 +47,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/configitem/where.go b/pkg/database/ent/configitem/where.go index 767f0b420f1..48ae792fd72 100644 --- a/pkg/database/ent/configitem/where.go +++ b/pkg/database/ent/configitem/where.go @@ -114,16 +114,6 @@ func CreatedAtLTE(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) @@ -164,16 +154,6 @@ func UpdatedAtLTE(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldNotNull(FieldUpdatedAt)) -} - // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) diff --git a/pkg/database/ent/configitem_create.go b/pkg/database/ent/configitem_create.go index 19e73dea41c..a2679927aee 100644 --- a/pkg/database/ent/configitem_create.go +++ b/pkg/database/ent/configitem_create.go @@ -107,6 +107,12 @@ func (cic *ConfigItemCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (cic *ConfigItemCreate) check() error { + if _, ok := cic.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ConfigItem.created_at"`)} + } + if _, ok := cic.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ConfigItem.updated_at"`)} + } if _, ok := cic.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ConfigItem.name"`)} } @@ -141,11 +147,11 @@ func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { ) if value, ok := cic.mutation.CreatedAt(); ok { _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := cic.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := cic.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index 11fb0755191..d4f1f15d23a 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -28,30 +28,12 @@ func (ciu *ConfigItemUpdate) Where(ps ...predicate.ConfigItem) *ConfigItemUpdate return ciu } -// SetCreatedAt sets the "created_at" field. -func (ciu *ConfigItemUpdate) SetCreatedAt(t time.Time) *ConfigItemUpdate { - ciu.mutation.SetCreatedAt(t) - return ciu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (ciu *ConfigItemUpdate) ClearCreatedAt() *ConfigItemUpdate { - ciu.mutation.ClearCreatedAt() - return ciu -} - // SetUpdatedAt sets the "updated_at" field. func (ciu *ConfigItemUpdate) SetUpdatedAt(t time.Time) *ConfigItemUpdate { ciu.mutation.SetUpdatedAt(t) return ciu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (ciu *ConfigItemUpdate) ClearUpdatedAt() *ConfigItemUpdate { - ciu.mutation.ClearUpdatedAt() - return ciu -} - // SetName sets the "name" field. func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { ciu.mutation.SetName(s) @@ -115,11 +97,7 @@ func (ciu *ConfigItemUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (ciu *ConfigItemUpdate) defaults() { - if _, ok := ciu.mutation.CreatedAt(); !ok && !ciu.mutation.CreatedAtCleared() { - v := configitem.UpdateDefaultCreatedAt() - ciu.mutation.SetCreatedAt(v) - } - if _, ok := ciu.mutation.UpdatedAt(); !ok && !ciu.mutation.UpdatedAtCleared() { + if _, ok := ciu.mutation.UpdatedAt(); !ok { v := configitem.UpdateDefaultUpdatedAt() ciu.mutation.SetUpdatedAt(v) } @@ -134,18 +112,9 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := ciu.mutation.CreatedAt(); ok { - _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - } - if ciu.mutation.CreatedAtCleared() { - _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) - } if value, ok := ciu.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if ciu.mutation.UpdatedAtCleared() { - _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) - } if value, ok := ciu.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) } @@ -172,30 +141,12 @@ type ConfigItemUpdateOne struct { mutation *ConfigItemMutation } -// SetCreatedAt sets the "created_at" field. -func (ciuo *ConfigItemUpdateOne) SetCreatedAt(t time.Time) *ConfigItemUpdateOne { - ciuo.mutation.SetCreatedAt(t) - return ciuo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (ciuo *ConfigItemUpdateOne) ClearCreatedAt() *ConfigItemUpdateOne { - ciuo.mutation.ClearCreatedAt() - return ciuo -} - // SetUpdatedAt sets the "updated_at" field. func (ciuo *ConfigItemUpdateOne) SetUpdatedAt(t time.Time) *ConfigItemUpdateOne { ciuo.mutation.SetUpdatedAt(t) return ciuo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (ciuo *ConfigItemUpdateOne) ClearUpdatedAt() *ConfigItemUpdateOne { - ciuo.mutation.ClearUpdatedAt() - return ciuo -} - // SetName sets the "name" field. func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { ciuo.mutation.SetName(s) @@ -272,11 +223,7 @@ func (ciuo *ConfigItemUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (ciuo *ConfigItemUpdateOne) defaults() { - if _, ok := ciuo.mutation.CreatedAt(); !ok && !ciuo.mutation.CreatedAtCleared() { - v := configitem.UpdateDefaultCreatedAt() - ciuo.mutation.SetCreatedAt(v) - } - if _, ok := ciuo.mutation.UpdatedAt(); !ok && !ciuo.mutation.UpdatedAtCleared() { + if _, ok := ciuo.mutation.UpdatedAt(); !ok { v := configitem.UpdateDefaultUpdatedAt() ciuo.mutation.SetUpdatedAt(v) } @@ -308,18 +255,9 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } } } - if value, ok := ciuo.mutation.CreatedAt(); ok { - _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - } - if ciuo.mutation.CreatedAtCleared() { - _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) - } if value, ok := ciuo.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if ciuo.mutation.UpdatedAtCleared() { - _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) - } if value, ok := ciuo.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) } diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index 8a08bc1dfd4..1cc0df4c784 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -19,9 +19,9 @@ type Decision struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Until holds the value of the "until" field. Until *time.Time `json:"until,omitempty"` // Scenario holds the value of the "scenario" field. @@ -116,15 +116,13 @@ func (d *Decision) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - d.CreatedAt = new(time.Time) - *d.CreatedAt = value.Time + d.CreatedAt = value.Time } case decision.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - d.UpdatedAt = new(time.Time) - *d.UpdatedAt = value.Time + d.UpdatedAt = value.Time } case decision.FieldUntil: if value, ok := values[i].(*sql.NullTime); !ok { @@ -252,15 +250,11 @@ func (d *Decision) String() string { var builder strings.Builder builder.WriteString("Decision(") builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) - if v := d.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := d.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := d.Until; v != nil { builder.WriteString("until=") diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go index d9f67623bd8..38c9721db48 100644 --- a/pkg/database/ent/decision/decision.go +++ b/pkg/database/ent/decision/decision.go @@ -93,8 +93,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go index 36374f5714d..99a1889e63e 100644 --- a/pkg/database/ent/decision/where.go +++ b/pkg/database/ent/decision/where.go @@ -175,16 +175,6 @@ func CreatedAtLTE(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Decision { - return predicate.Decision(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Decision { - return predicate.Decision(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) @@ -225,16 +215,6 @@ func UpdatedAtLTE(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Decision { - return predicate.Decision(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Decision { - return predicate.Decision(sql.FieldNotNull(FieldUpdatedAt)) -} - // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldEQ(FieldUntil, v)) diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go index 43a28c53114..f30d5452120 100644 --- a/pkg/database/ent/decision_create.go +++ b/pkg/database/ent/decision_create.go @@ -275,6 +275,12 @@ func (dc *DecisionCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (dc *DecisionCreate) check() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Decision.created_at"`)} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Decision.updated_at"`)} + } if _, ok := dc.mutation.Scenario(); !ok { return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Decision.scenario"`)} } @@ -321,11 +327,11 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { ) if value, ok := dc.mutation.CreatedAt(); ok { _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := dc.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := dc.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 182457e9f63..1bcb42f8c1f 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -29,30 +29,12 @@ func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate { return du } -// SetCreatedAt sets the "created_at" field. -func (du *DecisionUpdate) SetCreatedAt(t time.Time) *DecisionUpdate { - du.mutation.SetCreatedAt(t) - return du -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (du *DecisionUpdate) ClearCreatedAt() *DecisionUpdate { - du.mutation.ClearCreatedAt() - return du -} - // SetUpdatedAt sets the "updated_at" field. func (du *DecisionUpdate) SetUpdatedAt(t time.Time) *DecisionUpdate { du.mutation.SetUpdatedAt(t) return du } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (du *DecisionUpdate) ClearUpdatedAt() *DecisionUpdate { - du.mutation.ClearUpdatedAt() - return du -} - // SetUntil sets the "until" field. func (du *DecisionUpdate) SetUntil(t time.Time) *DecisionUpdate { du.mutation.SetUntil(t) @@ -392,11 +374,7 @@ func (du *DecisionUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (du *DecisionUpdate) defaults() { - if _, ok := du.mutation.CreatedAt(); !ok && !du.mutation.CreatedAtCleared() { - v := decision.UpdateDefaultCreatedAt() - du.mutation.SetCreatedAt(v) - } - if _, ok := du.mutation.UpdatedAt(); !ok && !du.mutation.UpdatedAtCleared() { + if _, ok := du.mutation.UpdatedAt(); !ok { v := decision.UpdateDefaultUpdatedAt() du.mutation.SetUpdatedAt(v) } @@ -411,18 +389,9 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := du.mutation.CreatedAt(); ok { - _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - } - if du.mutation.CreatedAtCleared() { - _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) - } if value, ok := du.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } - if du.mutation.UpdatedAtCleared() { - _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) - } if value, ok := du.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) } @@ -547,30 +516,12 @@ type DecisionUpdateOne struct { mutation *DecisionMutation } -// SetCreatedAt sets the "created_at" field. -func (duo *DecisionUpdateOne) SetCreatedAt(t time.Time) *DecisionUpdateOne { - duo.mutation.SetCreatedAt(t) - return duo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (duo *DecisionUpdateOne) ClearCreatedAt() *DecisionUpdateOne { - duo.mutation.ClearCreatedAt() - return duo -} - // SetUpdatedAt sets the "updated_at" field. func (duo *DecisionUpdateOne) SetUpdatedAt(t time.Time) *DecisionUpdateOne { duo.mutation.SetUpdatedAt(t) return duo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (duo *DecisionUpdateOne) ClearUpdatedAt() *DecisionUpdateOne { - duo.mutation.ClearUpdatedAt() - return duo -} - // SetUntil sets the "until" field. func (duo *DecisionUpdateOne) SetUntil(t time.Time) *DecisionUpdateOne { duo.mutation.SetUntil(t) @@ -923,11 +874,7 @@ func (duo *DecisionUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (duo *DecisionUpdateOne) defaults() { - if _, ok := duo.mutation.CreatedAt(); !ok && !duo.mutation.CreatedAtCleared() { - v := decision.UpdateDefaultCreatedAt() - duo.mutation.SetCreatedAt(v) - } - if _, ok := duo.mutation.UpdatedAt(); !ok && !duo.mutation.UpdatedAtCleared() { + if _, ok := duo.mutation.UpdatedAt(); !ok { v := decision.UpdateDefaultUpdatedAt() duo.mutation.SetUpdatedAt(v) } @@ -959,18 +906,9 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } } } - if value, ok := duo.mutation.CreatedAt(); ok { - _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - } - if duo.mutation.CreatedAtCleared() { - _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) - } if value, ok := duo.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } - if duo.mutation.UpdatedAtCleared() { - _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) - } if value, ok := duo.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) } diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index df4a2d10c8b..10e6d01c9d5 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -19,9 +19,9 @@ type Event struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Time holds the value of the "time" field. Time time.Time `json:"time,omitempty"` // Serialized holds the value of the "serialized" field. @@ -92,15 +92,13 @@ func (e *Event) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - e.CreatedAt = new(time.Time) - *e.CreatedAt = value.Time + e.CreatedAt = value.Time } case event.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - e.UpdatedAt = new(time.Time) - *e.UpdatedAt = value.Time + e.UpdatedAt = value.Time } case event.FieldTime: if value, ok := values[i].(*sql.NullTime); !ok { @@ -161,15 +159,11 @@ func (e *Event) String() string { var builder strings.Builder builder.WriteString("Event(") builder.WriteString(fmt.Sprintf("id=%v, ", e.ID)) - if v := e.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(e.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := e.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(e.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("time=") builder.WriteString(e.Time.Format(time.ANSIC)) diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go index 48f5a355824..c975a612669 100644 --- a/pkg/database/ent/event/event.go +++ b/pkg/database/ent/event/event.go @@ -60,8 +60,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go index 238bea988bd..d420b125026 100644 --- a/pkg/database/ent/event/where.go +++ b/pkg/database/ent/event/where.go @@ -120,16 +120,6 @@ func CreatedAtLTE(v time.Time) predicate.Event { return predicate.Event(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Event { - return predicate.Event(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Event { - return predicate.Event(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Event { return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) @@ -170,16 +160,6 @@ func UpdatedAtLTE(v time.Time) predicate.Event { return predicate.Event(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Event { - return predicate.Event(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Event { - return predicate.Event(sql.FieldNotNull(FieldUpdatedAt)) -} - // TimeEQ applies the EQ predicate on the "time" field. func TimeEQ(v time.Time) predicate.Event { return predicate.Event(sql.FieldEQ(FieldTime, v)) diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go index 98194f2fd33..36747babe47 100644 --- a/pkg/database/ent/event_create.go +++ b/pkg/database/ent/event_create.go @@ -141,6 +141,12 @@ func (ec *EventCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ec *EventCreate) check() error { + if _, ok := ec.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Event.created_at"`)} + } + if _, ok := ec.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Event.updated_at"`)} + } if _, ok := ec.mutation.Time(); !ok { return &ValidationError{Name: "time", err: errors.New(`ent: missing required field "Event.time"`)} } @@ -180,11 +186,11 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { ) if value, ok := ec.mutation.CreatedAt(); ok { _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := ec.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := ec.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index a06178f79af..0bc8a7f9243 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -29,30 +29,12 @@ func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate { return eu } -// SetCreatedAt sets the "created_at" field. -func (eu *EventUpdate) SetCreatedAt(t time.Time) *EventUpdate { - eu.mutation.SetCreatedAt(t) - return eu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (eu *EventUpdate) ClearCreatedAt() *EventUpdate { - eu.mutation.ClearCreatedAt() - return eu -} - // SetUpdatedAt sets the "updated_at" field. func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { eu.mutation.SetUpdatedAt(t) return eu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (eu *EventUpdate) ClearUpdatedAt() *EventUpdate { - eu.mutation.ClearUpdatedAt() - return eu -} - // SetTime sets the "time" field. func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { eu.mutation.SetTime(t) @@ -161,11 +143,7 @@ func (eu *EventUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (eu *EventUpdate) defaults() { - if _, ok := eu.mutation.CreatedAt(); !ok && !eu.mutation.CreatedAtCleared() { - v := event.UpdateDefaultCreatedAt() - eu.mutation.SetCreatedAt(v) - } - if _, ok := eu.mutation.UpdatedAt(); !ok && !eu.mutation.UpdatedAtCleared() { + if _, ok := eu.mutation.UpdatedAt(); !ok { v := event.UpdateDefaultUpdatedAt() eu.mutation.SetUpdatedAt(v) } @@ -193,18 +171,9 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := eu.mutation.CreatedAt(); ok { - _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - } - if eu.mutation.CreatedAtCleared() { - _spec.ClearField(event.FieldCreatedAt, field.TypeTime) - } if value, ok := eu.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if eu.mutation.UpdatedAtCleared() { - _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) - } if value, ok := eu.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) } @@ -260,30 +229,12 @@ type EventUpdateOne struct { mutation *EventMutation } -// SetCreatedAt sets the "created_at" field. -func (euo *EventUpdateOne) SetCreatedAt(t time.Time) *EventUpdateOne { - euo.mutation.SetCreatedAt(t) - return euo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (euo *EventUpdateOne) ClearCreatedAt() *EventUpdateOne { - euo.mutation.ClearCreatedAt() - return euo -} - // SetUpdatedAt sets the "updated_at" field. func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { euo.mutation.SetUpdatedAt(t) return euo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (euo *EventUpdateOne) ClearUpdatedAt() *EventUpdateOne { - euo.mutation.ClearUpdatedAt() - return euo -} - // SetTime sets the "time" field. func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { euo.mutation.SetTime(t) @@ -405,11 +356,7 @@ func (euo *EventUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (euo *EventUpdateOne) defaults() { - if _, ok := euo.mutation.CreatedAt(); !ok && !euo.mutation.CreatedAtCleared() { - v := event.UpdateDefaultCreatedAt() - euo.mutation.SetCreatedAt(v) - } - if _, ok := euo.mutation.UpdatedAt(); !ok && !euo.mutation.UpdatedAtCleared() { + if _, ok := euo.mutation.UpdatedAt(); !ok { v := event.UpdateDefaultUpdatedAt() euo.mutation.SetUpdatedAt(v) } @@ -454,18 +401,9 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } } } - if value, ok := euo.mutation.CreatedAt(); ok { - _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - } - if euo.mutation.CreatedAtCleared() { - _spec.ClearField(event.FieldCreatedAt, field.TypeTime) - } if value, ok := euo.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if euo.mutation.UpdatedAtCleared() { - _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) - } if value, ok := euo.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) } diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index dc61dfdfde1..988363abd17 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -28,20 +28,6 @@ func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { return lu } -// SetName sets the "name" field. -func (lu *LockUpdate) SetName(s string) *LockUpdate { - lu.mutation.SetName(s) - return lu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (lu *LockUpdate) SetNillableName(s *string) *LockUpdate { - if s != nil { - lu.SetName(*s) - } - return lu -} - // SetCreatedAt sets the "created_at" field. func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { lu.mutation.SetCreatedAt(t) @@ -97,9 +83,6 @@ func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := lu.mutation.Name(); ok { - _spec.SetField(lock.FieldName, field.TypeString, value) - } if value, ok := lu.mutation.CreatedAt(); ok { _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) } @@ -123,20 +106,6 @@ type LockUpdateOne struct { mutation *LockMutation } -// SetName sets the "name" field. -func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { - luo.mutation.SetName(s) - return luo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (luo *LockUpdateOne) SetNillableName(s *string) *LockUpdateOne { - if s != nil { - luo.SetName(*s) - } - return luo -} - // SetCreatedAt sets the "created_at" field. func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { luo.mutation.SetCreatedAt(t) @@ -222,9 +191,6 @@ func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) } } } - if value, ok := luo.mutation.Name(); ok { - _spec.SetField(lock.FieldName, field.TypeString, value) - } if value, ok := luo.mutation.CreatedAt(); ok { _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) } diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 346a8d084ba..475eab72ecd 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -18,9 +18,9 @@ type Machine struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // LastPush holds the value of the "last_push" field. LastPush *time.Time `json:"last_push,omitempty"` // LastHeartbeat holds the value of the "last_heartbeat" field. @@ -103,15 +103,13 @@ func (m *Machine) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = new(time.Time) - *m.CreatedAt = value.Time + m.CreatedAt = value.Time } case machine.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = new(time.Time) - *m.UpdatedAt = value.Time + m.UpdatedAt = value.Time } case machine.FieldLastPush: if value, ok := values[i].(*sql.NullTime); !ok { @@ -216,15 +214,11 @@ func (m *Machine) String() string { var builder strings.Builder builder.WriteString("Machine(") builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) - if v := m.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := m.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.LastPush; v != nil { builder.WriteString("last_push=") diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 5456935e04c..46ea6deb03d 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -81,20 +81,14 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. UpdateDefaultUpdatedAt func() time.Time // DefaultLastPush holds the default value on creation for the "last_push" field. DefaultLastPush func() time.Time - // UpdateDefaultLastPush holds the default value on update for the "last_push" field. - UpdateDefaultLastPush func() time.Time // DefaultLastHeartbeat holds the default value on creation for the "last_heartbeat" field. DefaultLastHeartbeat func() time.Time - // UpdateDefaultLastHeartbeat holds the default value on update for the "last_heartbeat" field. - UpdateDefaultLastHeartbeat func() time.Time // ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. ScenariosValidator func(string) error // DefaultIsValidated holds the default value on creation for the "isValidated" field. diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index e9d00e7e01e..24c9ab154c8 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -155,16 +155,6 @@ func CreatedAtLTE(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) @@ -205,16 +195,6 @@ func UpdatedAtLTE(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldUpdatedAt)) -} - // LastPushEQ applies the EQ predicate on the "last_push" field. func LastPushEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index ff704e6ab74..8d4bfb74b2a 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -243,6 +243,12 @@ func (mc *MachineCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (mc *MachineCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Machine.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Machine.updated_at"`)} + } if _, ok := mc.mutation.MachineId(); !ok { return &ValidationError{Name: "machineId", err: errors.New(`ent: missing required field "Machine.machineId"`)} } @@ -291,11 +297,11 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { ) if value, ok := mc.mutation.CreatedAt(); ok { _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := mc.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := mc.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index 1f87ac04d6f..5fbd15220f9 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -29,36 +29,26 @@ func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate { return mu } -// SetCreatedAt sets the "created_at" field. -func (mu *MachineUpdate) SetCreatedAt(t time.Time) *MachineUpdate { - mu.mutation.SetCreatedAt(t) - return mu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (mu *MachineUpdate) ClearCreatedAt() *MachineUpdate { - mu.mutation.ClearCreatedAt() - return mu -} - // SetUpdatedAt sets the "updated_at" field. func (mu *MachineUpdate) SetUpdatedAt(t time.Time) *MachineUpdate { mu.mutation.SetUpdatedAt(t) return mu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (mu *MachineUpdate) ClearUpdatedAt() *MachineUpdate { - mu.mutation.ClearUpdatedAt() - return mu -} - // SetLastPush sets the "last_push" field. func (mu *MachineUpdate) SetLastPush(t time.Time) *MachineUpdate { mu.mutation.SetLastPush(t) return mu } +// SetNillableLastPush sets the "last_push" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableLastPush(t *time.Time) *MachineUpdate { + if t != nil { + mu.SetLastPush(*t) + } + return mu +} + // ClearLastPush clears the value of the "last_push" field. func (mu *MachineUpdate) ClearLastPush() *MachineUpdate { mu.mutation.ClearLastPush() @@ -71,23 +61,17 @@ func (mu *MachineUpdate) SetLastHeartbeat(t time.Time) *MachineUpdate { return mu } -// ClearLastHeartbeat clears the value of the "last_heartbeat" field. -func (mu *MachineUpdate) ClearLastHeartbeat() *MachineUpdate { - mu.mutation.ClearLastHeartbeat() - return mu -} - -// SetMachineId sets the "machineId" field. -func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { - mu.mutation.SetMachineId(s) +// SetNillableLastHeartbeat sets the "last_heartbeat" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableLastHeartbeat(t *time.Time) *MachineUpdate { + if t != nil { + mu.SetLastHeartbeat(*t) + } return mu } -// SetNillableMachineId sets the "machineId" field if the given value is not nil. -func (mu *MachineUpdate) SetNillableMachineId(s *string) *MachineUpdate { - if s != nil { - mu.SetMachineId(*s) - } +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (mu *MachineUpdate) ClearLastHeartbeat() *MachineUpdate { + mu.mutation.ClearLastHeartbeat() return mu } @@ -278,22 +262,10 @@ func (mu *MachineUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (mu *MachineUpdate) defaults() { - if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { - v := machine.UpdateDefaultCreatedAt() - mu.mutation.SetCreatedAt(v) - } - if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + if _, ok := mu.mutation.UpdatedAt(); !ok { v := machine.UpdateDefaultUpdatedAt() mu.mutation.SetUpdatedAt(v) } - if _, ok := mu.mutation.LastPush(); !ok && !mu.mutation.LastPushCleared() { - v := machine.UpdateDefaultLastPush() - mu.mutation.SetLastPush(v) - } - if _, ok := mu.mutation.LastHeartbeat(); !ok && !mu.mutation.LastHeartbeatCleared() { - v := machine.UpdateDefaultLastHeartbeat() - mu.mutation.SetLastHeartbeat(v) - } } // check runs all checks and user-defined validators on the builder. @@ -318,18 +290,9 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := mu.mutation.CreatedAt(); ok { - _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - } - if mu.mutation.CreatedAtCleared() { - _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } - if mu.mutation.UpdatedAtCleared() { - _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) - } if value, ok := mu.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } @@ -342,9 +305,6 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if mu.mutation.LastHeartbeatCleared() { _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } - if value, ok := mu.mutation.MachineId(); ok { - _spec.SetField(machine.FieldMachineId, field.TypeString, value) - } if value, ok := mu.mutation.Password(); ok { _spec.SetField(machine.FieldPassword, field.TypeString, value) } @@ -440,36 +400,26 @@ type MachineUpdateOne struct { mutation *MachineMutation } -// SetCreatedAt sets the "created_at" field. -func (muo *MachineUpdateOne) SetCreatedAt(t time.Time) *MachineUpdateOne { - muo.mutation.SetCreatedAt(t) - return muo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (muo *MachineUpdateOne) ClearCreatedAt() *MachineUpdateOne { - muo.mutation.ClearCreatedAt() - return muo -} - // SetUpdatedAt sets the "updated_at" field. func (muo *MachineUpdateOne) SetUpdatedAt(t time.Time) *MachineUpdateOne { muo.mutation.SetUpdatedAt(t) return muo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (muo *MachineUpdateOne) ClearUpdatedAt() *MachineUpdateOne { - muo.mutation.ClearUpdatedAt() - return muo -} - // SetLastPush sets the "last_push" field. func (muo *MachineUpdateOne) SetLastPush(t time.Time) *MachineUpdateOne { muo.mutation.SetLastPush(t) return muo } +// SetNillableLastPush sets the "last_push" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableLastPush(t *time.Time) *MachineUpdateOne { + if t != nil { + muo.SetLastPush(*t) + } + return muo +} + // ClearLastPush clears the value of the "last_push" field. func (muo *MachineUpdateOne) ClearLastPush() *MachineUpdateOne { muo.mutation.ClearLastPush() @@ -482,23 +432,17 @@ func (muo *MachineUpdateOne) SetLastHeartbeat(t time.Time) *MachineUpdateOne { return muo } -// ClearLastHeartbeat clears the value of the "last_heartbeat" field. -func (muo *MachineUpdateOne) ClearLastHeartbeat() *MachineUpdateOne { - muo.mutation.ClearLastHeartbeat() - return muo -} - -// SetMachineId sets the "machineId" field. -func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { - muo.mutation.SetMachineId(s) +// SetNillableLastHeartbeat sets the "last_heartbeat" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableLastHeartbeat(t *time.Time) *MachineUpdateOne { + if t != nil { + muo.SetLastHeartbeat(*t) + } return muo } -// SetNillableMachineId sets the "machineId" field if the given value is not nil. -func (muo *MachineUpdateOne) SetNillableMachineId(s *string) *MachineUpdateOne { - if s != nil { - muo.SetMachineId(*s) - } +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (muo *MachineUpdateOne) ClearLastHeartbeat() *MachineUpdateOne { + muo.mutation.ClearLastHeartbeat() return muo } @@ -702,22 +646,10 @@ func (muo *MachineUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (muo *MachineUpdateOne) defaults() { - if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { - v := machine.UpdateDefaultCreatedAt() - muo.mutation.SetCreatedAt(v) - } - if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + if _, ok := muo.mutation.UpdatedAt(); !ok { v := machine.UpdateDefaultUpdatedAt() muo.mutation.SetUpdatedAt(v) } - if _, ok := muo.mutation.LastPush(); !ok && !muo.mutation.LastPushCleared() { - v := machine.UpdateDefaultLastPush() - muo.mutation.SetLastPush(v) - } - if _, ok := muo.mutation.LastHeartbeat(); !ok && !muo.mutation.LastHeartbeatCleared() { - v := machine.UpdateDefaultLastHeartbeat() - muo.mutation.SetLastHeartbeat(v) - } } // check runs all checks and user-defined validators on the builder. @@ -759,18 +691,9 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } } } - if value, ok := muo.mutation.CreatedAt(); ok { - _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - } - if muo.mutation.CreatedAtCleared() { - _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } - if muo.mutation.UpdatedAtCleared() { - _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) - } if value, ok := muo.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } @@ -783,9 +706,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if muo.mutation.LastHeartbeatCleared() { _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } - if value, ok := muo.mutation.MachineId(); ok { - _spec.SetField(machine.FieldMachineId, field.TypeString, value) - } if value, ok := muo.mutation.Password(); ok { _spec.SetField(machine.FieldPassword, field.TypeString, value) } diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index cadc210937e..768358ca2bf 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -19,9 +19,9 @@ type Meta struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Key holds the value of the "key" field. Key string `json:"key,omitempty"` // Value holds the value of the "value" field. @@ -92,15 +92,13 @@ func (m *Meta) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = new(time.Time) - *m.CreatedAt = value.Time + m.CreatedAt = value.Time } case meta.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = new(time.Time) - *m.UpdatedAt = value.Time + m.UpdatedAt = value.Time } case meta.FieldKey: if value, ok := values[i].(*sql.NullString); !ok { @@ -161,15 +159,11 @@ func (m *Meta) String() string { var builder strings.Builder builder.WriteString("Meta(") builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) - if v := m.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := m.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("key=") builder.WriteString(m.Key) diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go index 583496fb710..ff41361616a 100644 --- a/pkg/database/ent/meta/meta.go +++ b/pkg/database/ent/meta/meta.go @@ -60,8 +60,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go index 7fc99136972..6d5d54c0482 100644 --- a/pkg/database/ent/meta/where.go +++ b/pkg/database/ent/meta/where.go @@ -120,16 +120,6 @@ func CreatedAtLTE(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Meta { - return predicate.Meta(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Meta { - return predicate.Meta(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) @@ -170,16 +160,6 @@ func UpdatedAtLTE(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Meta { - return predicate.Meta(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Meta { - return predicate.Meta(sql.FieldNotNull(FieldUpdatedAt)) -} - // KeyEQ applies the EQ predicate on the "key" field. func KeyEQ(v string) predicate.Meta { return predicate.Meta(sql.FieldEQ(FieldKey, v)) diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go index 3bf30f0def9..321c4bd7ab4 100644 --- a/pkg/database/ent/meta_create.go +++ b/pkg/database/ent/meta_create.go @@ -141,6 +141,12 @@ func (mc *MetaCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (mc *MetaCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Meta.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Meta.updated_at"`)} + } if _, ok := mc.mutation.Key(); !ok { return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "Meta.key"`)} } @@ -180,11 +186,11 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { ) if value, ok := mc.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := mc.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := mc.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index a1379faa130..76567c5eff7 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -35,9 +35,11 @@ func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { return mu } -// ClearCreatedAt clears the value of the "created_at" field. -func (mu *MetaUpdate) ClearCreatedAt() *MetaUpdate { - mu.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableCreatedAt(t *time.Time) *MetaUpdate { + if t != nil { + mu.SetCreatedAt(*t) + } return mu } @@ -47,12 +49,6 @@ func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { return mu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (mu *MetaUpdate) ClearUpdatedAt() *MetaUpdate { - mu.mutation.ClearUpdatedAt() - return mu -} - // SetKey sets the "key" field. func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { mu.mutation.SetKey(s) @@ -161,11 +157,7 @@ func (mu *MetaUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (mu *MetaUpdate) defaults() { - if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { - v := meta.UpdateDefaultCreatedAt() - mu.mutation.SetCreatedAt(v) - } - if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + if _, ok := mu.mutation.UpdatedAt(); !ok { v := meta.UpdateDefaultUpdatedAt() mu.mutation.SetUpdatedAt(v) } @@ -196,15 +188,9 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } - if mu.mutation.CreatedAtCleared() { - _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if mu.mutation.UpdatedAtCleared() { - _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) - } if value, ok := mu.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) } @@ -266,9 +252,11 @@ func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { return muo } -// ClearCreatedAt clears the value of the "created_at" field. -func (muo *MetaUpdateOne) ClearCreatedAt() *MetaUpdateOne { - muo.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableCreatedAt(t *time.Time) *MetaUpdateOne { + if t != nil { + muo.SetCreatedAt(*t) + } return muo } @@ -278,12 +266,6 @@ func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { return muo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (muo *MetaUpdateOne) ClearUpdatedAt() *MetaUpdateOne { - muo.mutation.ClearUpdatedAt() - return muo -} - // SetKey sets the "key" field. func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { muo.mutation.SetKey(s) @@ -405,11 +387,7 @@ func (muo *MetaUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (muo *MetaUpdateOne) defaults() { - if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { - v := meta.UpdateDefaultCreatedAt() - muo.mutation.SetCreatedAt(v) - } - if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + if _, ok := muo.mutation.UpdatedAt(); !ok { v := meta.UpdateDefaultUpdatedAt() muo.mutation.SetUpdatedAt(v) } @@ -457,15 +435,9 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) if value, ok := muo.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } - if muo.mutation.CreatedAtCleared() { - _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if muo.mutation.UpdatedAtCleared() { - _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) - } if value, ok := muo.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index c3ffed42239..e5d43d42314 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -11,8 +11,8 @@ var ( // AlertsColumns holds the columns for the "alerts" table. AlertsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "scenario", Type: field.TypeString}, {Name: "bucket_id", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "message", Type: field.TypeString, Nullable: true, Default: ""}, @@ -60,8 +60,8 @@ var ( // BouncersColumns holds the columns for the "bouncers" table. BouncersColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, {Name: "api_key", Type: field.TypeString}, {Name: "revoked", Type: field.TypeBool}, @@ -81,8 +81,8 @@ var ( // ConfigItemsColumns holds the columns for the "config_items" table. ConfigItemsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, {Name: "value", Type: field.TypeString}, } @@ -95,8 +95,8 @@ var ( // DecisionsColumns holds the columns for the "decisions" table. DecisionsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}}, {Name: "scenario", Type: field.TypeString}, {Name: "type", Type: field.TypeString}, @@ -151,8 +151,8 @@ var ( // EventsColumns holds the columns for the "events" table. EventsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "time", Type: field.TypeTime}, {Name: "serialized", Type: field.TypeString, Size: 8191}, {Name: "alert_events", Type: field.TypeInt, Nullable: true}, @@ -193,8 +193,8 @@ var ( // MachinesColumns holds the columns for the "machines" table. MachinesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "last_push", Type: field.TypeTime, Nullable: true}, {Name: "last_heartbeat", Type: field.TypeTime, Nullable: true}, {Name: "machine_id", Type: field.TypeString, Unique: true}, @@ -215,8 +215,8 @@ var ( // MetaColumns holds the columns for the "meta" table. MetaColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "key", Type: field.TypeString}, {Name: "value", Type: field.TypeString, Size: 4095}, {Name: "alert_metas", Type: field.TypeInt, Nullable: true}, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 365824de739..aed004fb7a6 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -206,7 +206,7 @@ func (m *AlertMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Alert entity. // If the Alert object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -220,22 +220,9 @@ func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *AlertMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[alert.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *AlertMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[alert.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *AlertMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, alert.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -255,7 +242,7 @@ func (m *AlertMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Alert entity. // If the Alert object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -269,22 +256,9 @@ func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *AlertMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[alert.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *AlertMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[alert.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *AlertMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, alert.FieldUpdatedAt) } // SetScenario sets the "scenario" field. @@ -2039,12 +2013,6 @@ func (m *AlertMutation) AddField(name string, value ent.Value) error { // mutation. func (m *AlertMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(alert.FieldCreatedAt) { - fields = append(fields, alert.FieldCreatedAt) - } - if m.FieldCleared(alert.FieldUpdatedAt) { - fields = append(fields, alert.FieldUpdatedAt) - } if m.FieldCleared(alert.FieldBucketId) { fields = append(fields, alert.FieldBucketId) } @@ -2116,12 +2084,6 @@ func (m *AlertMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *AlertMutation) ClearField(name string) error { switch name { - case alert.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case alert.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case alert.FieldBucketId: m.ClearBucketId() return nil @@ -2552,7 +2514,7 @@ func (m *BouncerMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -2566,22 +2528,9 @@ func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err e return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *BouncerMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[bouncer.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *BouncerMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[bouncer.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *BouncerMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, bouncer.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -2601,7 +2550,7 @@ func (m *BouncerMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -2615,22 +2564,9 @@ func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err e return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *BouncerMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[bouncer.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *BouncerMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[bouncer.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *BouncerMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, bouncer.FieldUpdatedAt) } // SetName sets the "name" field. @@ -3254,12 +3190,6 @@ func (m *BouncerMutation) AddField(name string, value ent.Value) error { // mutation. func (m *BouncerMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(bouncer.FieldCreatedAt) { - fields = append(fields, bouncer.FieldCreatedAt) - } - if m.FieldCleared(bouncer.FieldUpdatedAt) { - fields = append(fields, bouncer.FieldUpdatedAt) - } if m.FieldCleared(bouncer.FieldIPAddress) { fields = append(fields, bouncer.FieldIPAddress) } @@ -3286,12 +3216,6 @@ func (m *BouncerMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *BouncerMutation) ClearField(name string) error { switch name { - case bouncer.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case bouncer.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case bouncer.FieldIPAddress: m.ClearIPAddress() return nil @@ -3528,7 +3452,7 @@ func (m *ConfigItemMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the ConfigItem entity. // If the ConfigItem object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -3542,22 +3466,9 @@ func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, er return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *ConfigItemMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[configitem.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *ConfigItemMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[configitem.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *ConfigItemMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, configitem.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -3577,7 +3488,7 @@ func (m *ConfigItemMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the ConfigItem entity. // If the ConfigItem object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -3591,22 +3502,9 @@ func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, er return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *ConfigItemMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[configitem.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *ConfigItemMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[configitem.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *ConfigItemMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, configitem.FieldUpdatedAt) } // SetName sets the "name" field. @@ -3827,14 +3725,7 @@ func (m *ConfigItemMutation) AddField(name string, value ent.Value) error { // ClearedFields returns all nullable fields that were cleared during this // mutation. func (m *ConfigItemMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(configitem.FieldCreatedAt) { - fields = append(fields, configitem.FieldCreatedAt) - } - if m.FieldCleared(configitem.FieldUpdatedAt) { - fields = append(fields, configitem.FieldUpdatedAt) - } - return fields + return nil } // FieldCleared returns a boolean indicating if a field with the given name was @@ -3847,14 +3738,6 @@ func (m *ConfigItemMutation) FieldCleared(name string) bool { // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. func (m *ConfigItemMutation) ClearField(name string) error { - switch name { - case configitem.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case configitem.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil - } return fmt.Errorf("unknown ConfigItem nullable field %s", name) } @@ -4075,7 +3958,7 @@ func (m *DecisionMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Decision entity. // If the Decision object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -4089,22 +3972,9 @@ func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *DecisionMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[decision.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *DecisionMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[decision.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *DecisionMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, decision.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -4124,7 +3994,7 @@ func (m *DecisionMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Decision entity. // If the Decision object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -4138,22 +4008,9 @@ func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *DecisionMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[decision.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *DecisionMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[decision.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *DecisionMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, decision.FieldUpdatedAt) } // SetUntil sets the "until" field. @@ -5287,12 +5144,6 @@ func (m *DecisionMutation) AddField(name string, value ent.Value) error { // mutation. func (m *DecisionMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(decision.FieldCreatedAt) { - fields = append(fields, decision.FieldCreatedAt) - } - if m.FieldCleared(decision.FieldUpdatedAt) { - fields = append(fields, decision.FieldUpdatedAt) - } if m.FieldCleared(decision.FieldUntil) { fields = append(fields, decision.FieldUntil) } @@ -5331,12 +5182,6 @@ func (m *DecisionMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *DecisionMutation) ClearField(name string) error { switch name { - case decision.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case decision.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case decision.FieldUntil: m.ClearUntil() return nil @@ -5628,7 +5473,7 @@ func (m *EventMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Event entity. // If the Event object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *EventMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -5642,22 +5487,9 @@ func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *EventMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[event.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *EventMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[event.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *EventMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, event.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -5677,7 +5509,7 @@ func (m *EventMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Event entity. // If the Event object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -5691,22 +5523,9 @@ func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *EventMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[event.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *EventMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[event.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *EventMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, event.FieldUpdatedAt) } // SetTime sets the "time" field. @@ -6034,12 +5853,6 @@ func (m *EventMutation) AddField(name string, value ent.Value) error { // mutation. func (m *EventMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(event.FieldCreatedAt) { - fields = append(fields, event.FieldCreatedAt) - } - if m.FieldCleared(event.FieldUpdatedAt) { - fields = append(fields, event.FieldUpdatedAt) - } if m.FieldCleared(event.FieldAlertEvents) { fields = append(fields, event.FieldAlertEvents) } @@ -6057,12 +5870,6 @@ func (m *EventMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *EventMutation) ClearField(name string) error { switch name { - case event.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case event.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case event.FieldAlertEvents: m.ClearAlertEvents() return nil @@ -6689,7 +6496,7 @@ func (m *MachineMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -6703,22 +6510,9 @@ func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err e return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *MachineMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[machine.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *MachineMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[machine.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *MachineMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, machine.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -6738,7 +6532,7 @@ func (m *MachineMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -6752,22 +6546,9 @@ func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err e return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *MachineMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[machine.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *MachineMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[machine.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *MachineMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, machine.FieldUpdatedAt) } // SetLastPush sets the "last_push" field. @@ -7508,12 +7289,6 @@ func (m *MachineMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MachineMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(machine.FieldCreatedAt) { - fields = append(fields, machine.FieldCreatedAt) - } - if m.FieldCleared(machine.FieldUpdatedAt) { - fields = append(fields, machine.FieldUpdatedAt) - } if m.FieldCleared(machine.FieldLastPush) { fields = append(fields, machine.FieldLastPush) } @@ -7543,12 +7318,6 @@ func (m *MachineMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MachineMutation) ClearField(name string) error { switch name { - case machine.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case machine.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case machine.FieldLastPush: m.ClearLastPush() return nil @@ -7829,7 +7598,7 @@ func (m *MetaMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Meta entity. // If the Meta object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -7843,22 +7612,9 @@ func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err erro return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *MetaMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[meta.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *MetaMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[meta.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *MetaMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, meta.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -7878,7 +7634,7 @@ func (m *MetaMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Meta entity. // If the Meta object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -7892,22 +7648,9 @@ func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err erro return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *MetaMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[meta.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *MetaMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[meta.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *MetaMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, meta.FieldUpdatedAt) } // SetKey sets the "key" field. @@ -8235,12 +7978,6 @@ func (m *MetaMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MetaMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(meta.FieldCreatedAt) { - fields = append(fields, meta.FieldCreatedAt) - } - if m.FieldCleared(meta.FieldUpdatedAt) { - fields = append(fields, meta.FieldUpdatedAt) - } if m.FieldCleared(meta.FieldAlertMetas) { fields = append(fields, meta.FieldAlertMetas) } @@ -8258,12 +7995,6 @@ func (m *MetaMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MetaMutation) ClearField(name string) error { switch name { - case meta.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case meta.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case meta.FieldAlertMetas: m.ClearAlertMetas() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 87073074563..1c5b3460cce 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -26,8 +26,6 @@ func init() { alertDescCreatedAt := alertFields[0].Descriptor() // alert.DefaultCreatedAt holds the default value on creation for the created_at field. alert.DefaultCreatedAt = alertDescCreatedAt.Default.(func() time.Time) - // alert.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - alert.UpdateDefaultCreatedAt = alertDescCreatedAt.UpdateDefault.(func() time.Time) // alertDescUpdatedAt is the schema descriptor for updated_at field. alertDescUpdatedAt := alertFields[1].Descriptor() // alert.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -64,8 +62,6 @@ func init() { bouncerDescCreatedAt := bouncerFields[0].Descriptor() // bouncer.DefaultCreatedAt holds the default value on creation for the created_at field. bouncer.DefaultCreatedAt = bouncerDescCreatedAt.Default.(func() time.Time) - // bouncer.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - bouncer.UpdateDefaultCreatedAt = bouncerDescCreatedAt.UpdateDefault.(func() time.Time) // bouncerDescUpdatedAt is the schema descriptor for updated_at field. bouncerDescUpdatedAt := bouncerFields[1].Descriptor() // bouncer.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -94,8 +90,6 @@ func init() { configitemDescCreatedAt := configitemFields[0].Descriptor() // configitem.DefaultCreatedAt holds the default value on creation for the created_at field. configitem.DefaultCreatedAt = configitemDescCreatedAt.Default.(func() time.Time) - // configitem.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - configitem.UpdateDefaultCreatedAt = configitemDescCreatedAt.UpdateDefault.(func() time.Time) // configitemDescUpdatedAt is the schema descriptor for updated_at field. configitemDescUpdatedAt := configitemFields[1].Descriptor() // configitem.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -108,8 +102,6 @@ func init() { decisionDescCreatedAt := decisionFields[0].Descriptor() // decision.DefaultCreatedAt holds the default value on creation for the created_at field. decision.DefaultCreatedAt = decisionDescCreatedAt.Default.(func() time.Time) - // decision.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - decision.UpdateDefaultCreatedAt = decisionDescCreatedAt.UpdateDefault.(func() time.Time) // decisionDescUpdatedAt is the schema descriptor for updated_at field. decisionDescUpdatedAt := decisionFields[1].Descriptor() // decision.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -126,8 +118,6 @@ func init() { eventDescCreatedAt := eventFields[0].Descriptor() // event.DefaultCreatedAt holds the default value on creation for the created_at field. event.DefaultCreatedAt = eventDescCreatedAt.Default.(func() time.Time) - // event.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - event.UpdateDefaultCreatedAt = eventDescCreatedAt.UpdateDefault.(func() time.Time) // eventDescUpdatedAt is the schema descriptor for updated_at field. eventDescUpdatedAt := eventFields[1].Descriptor() // event.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -150,8 +140,6 @@ func init() { machineDescCreatedAt := machineFields[0].Descriptor() // machine.DefaultCreatedAt holds the default value on creation for the created_at field. machine.DefaultCreatedAt = machineDescCreatedAt.Default.(func() time.Time) - // machine.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - machine.UpdateDefaultCreatedAt = machineDescCreatedAt.UpdateDefault.(func() time.Time) // machineDescUpdatedAt is the schema descriptor for updated_at field. machineDescUpdatedAt := machineFields[1].Descriptor() // machine.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -162,14 +150,10 @@ func init() { machineDescLastPush := machineFields[2].Descriptor() // machine.DefaultLastPush holds the default value on creation for the last_push field. machine.DefaultLastPush = machineDescLastPush.Default.(func() time.Time) - // machine.UpdateDefaultLastPush holds the default value on update for the last_push field. - machine.UpdateDefaultLastPush = machineDescLastPush.UpdateDefault.(func() time.Time) // machineDescLastHeartbeat is the schema descriptor for last_heartbeat field. machineDescLastHeartbeat := machineFields[3].Descriptor() // machine.DefaultLastHeartbeat holds the default value on creation for the last_heartbeat field. machine.DefaultLastHeartbeat = machineDescLastHeartbeat.Default.(func() time.Time) - // machine.UpdateDefaultLastHeartbeat holds the default value on update for the last_heartbeat field. - machine.UpdateDefaultLastHeartbeat = machineDescLastHeartbeat.UpdateDefault.(func() time.Time) // machineDescScenarios is the schema descriptor for scenarios field. machineDescScenarios := machineFields[7].Descriptor() // machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. @@ -188,8 +172,6 @@ func init() { metaDescCreatedAt := metaFields[0].Descriptor() // meta.DefaultCreatedAt holds the default value on creation for the created_at field. meta.DefaultCreatedAt = metaDescCreatedAt.Default.(func() time.Time) - // meta.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - meta.UpdateDefaultCreatedAt = metaDescCreatedAt.UpdateDefault.(func() time.Time) // metaDescUpdatedAt is the schema descriptor for updated_at field. metaDescUpdatedAt := metaFields[1].Descriptor() // meta.DefaultUpdatedAt holds the default value on creation for the updated_at field. diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index f2df9d7f09c..bda7cc7d0b9 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -19,10 +19,10 @@ func (Alert) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.String("scenario"), field.String("bucketId").Default("").Optional(), field.String("message").Default("").Optional(), diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 986a1bf3ba8..18efecb3f03 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -16,10 +16,10 @@ func (Bouncer) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + StructTag(`json:"created_at"`), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), diff --git a/pkg/database/ent/schema/config.go b/pkg/database/ent/schema/config.go index f3320a9cce6..036c55908ba 100644 --- a/pkg/database/ent/schema/config.go +++ b/pkg/database/ent/schema/config.go @@ -11,21 +11,20 @@ type ConfigItem struct { ent.Schema } -// Fields of the Bouncer. func (ConfigItem) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + Immutable(). + StructTag(`json:"created_at"`), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), field.String("value").StructTag(`json:"value"`), // a json object } } -// Edges of the Bouncer. func (ConfigItem) Edges() []ent.Edge { return nil } diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go index b7a99fb7a70..d5193910146 100644 --- a/pkg/database/ent/schema/decision.go +++ b/pkg/database/ent/schema/decision.go @@ -19,10 +19,10 @@ func (Decision) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("until").Nillable().Optional().SchemaType(map[string]string{ dialect.MySQL: "datetime", }), diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go index 6b6d2733ff7..f982ebe9653 100644 --- a/pkg/database/ent/schema/event.go +++ b/pkg/database/ent/schema/event.go @@ -18,10 +18,10 @@ func (Event) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("time"), field.String("serialized").MaxLen(8191), field.Int("alert_events").Optional(), diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go index de87efff3f7..0d49bac1bf6 100644 --- a/pkg/database/ent/schema/lock.go +++ b/pkg/database/ent/schema/lock.go @@ -12,7 +12,7 @@ type Lock struct { func (Lock) Fields() []ent.Field { return []ent.Field{ - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().Immutable().StructTag(`json:"name"`), field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), } } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index e155c936071..997a2041453 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -17,17 +17,19 @@ func (Machine) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("last_push"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Nillable().Optional(), field.Time("last_heartbeat"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), - field.String("machineId").Unique(), + Nillable().Optional(), + field.String("machineId"). + Unique(). + Immutable(), field.String("password").Sensitive(), field.String("ipAddress"), field.String("scenarios").MaxLen(100000).Optional(), diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go index 1a84bb1b667..877fffa8a2e 100644 --- a/pkg/database/ent/schema/meta.go +++ b/pkg/database/ent/schema/meta.go @@ -17,11 +17,10 @@ type Meta struct { func (Meta) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). - Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Default(types.UtcNow), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.String("key"), field.String("value").MaxLen(4095), field.Int("alert_metas").Optional(), diff --git a/pkg/database/machines.go b/pkg/database/machines.go index b9834e57e09..7a64c1d4d6e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -134,14 +134,6 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateMachineLastPush(machineID string) error { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastPush(time.Now().UTC()).Save(c.CTX) - if err != nil { - return errors.Wrapf(UpdateFail, "updating machine last_push: %s", err) - } - return nil -} - func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(c.CTX) if err != nil { From 529d3b20796c5df53a231bca20c35f80b44f9f08 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 13:42:47 +0200 Subject: [PATCH 112/318] minor lint/readability (#2976) * simplify a couple loops * if/else -> switch * drop redundant else * comment + drop var declaration + explicit zero return * lint (whitespace/fmt.Errorf) --- pkg/alertcontext/alertcontext.go | 36 ++++-- pkg/apiserver/apic.go | 58 +++++---- pkg/database/utils.go | 29 +++-- pkg/leakybucket/manager_load.go | 196 +++++++++++++++++++------------ pkg/types/ip.go | 21 +++- 5 files changed, 212 insertions(+), 128 deletions(-) diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 7586e7cb4af..8b0648ca0eb 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -19,9 +19,7 @@ const ( maxContextValueLen = 4000 ) -var ( - alertContext = Context{} -) +var alertContext = Context{} type Context struct { ContextToSend map[string][]string @@ -37,19 +35,21 @@ func ValidateContextExpr(key string, expressions []string) error { return fmt.Errorf("compilation of '%s' failed: %v", expression, err) } } + return nil } func NewAlertContext(contextToSend map[string][]string, valueLength int) error { - var clog = log.New() + clog := log.New() if err := types.ConfigureLogger(clog); err != nil { - return fmt.Errorf("couldn't create logger for alert context: %s", err) + return fmt.Errorf("couldn't create logger for alert context: %w", err) } if valueLength == 0 { clog.Debugf("No console context value length provided, using default: %d", maxContextValueLen) valueLength = maxContextValueLen } + if valueLength > maxContextValueLen { clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, maxContextValueLen) valueLength = maxContextValueLen @@ -76,6 +76,7 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { if err != nil { return fmt.Errorf("compilation of '%s' context value failed: %v", value, err) } + alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled) alertContext.ContextToSend[key] = append(alertContext.ContextToSend[key], value) } @@ -85,16 +86,13 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { } func truncate(values []string, contextValueLen int) (string, error) { - var ret string valueByte, err := json.Marshal(values) if err != nil { - return "", fmt.Errorf("unable to dump metas: %s", err) + return "", fmt.Errorf("unable to dump metas: %w", err) } - ret = string(valueByte) - for { - if len(ret) <= contextValueLen { - break - } + + ret := string(valueByte) + for len(ret) > contextValueLen { // if there is only 1 value left and that the size is too big, truncate it if len(values) == 1 { valueToTruncate := values[0] @@ -106,12 +104,15 @@ func truncate(values []string, contextValueLen int) (string, error) { // if there is multiple value inside, just remove the last one values = values[:len(values)-1] } + valueByte, err = json.Marshal(values) if err != nil { - return "", fmt.Errorf("unable to dump metas: %s", err) + return "", fmt.Errorf("unable to dump metas: %w", err) } + ret = string(valueByte) } + return ret, nil } @@ -120,18 +121,22 @@ func EventToContext(events []types.Event) (models.Meta, []error) { metas := make([]*models.MetaItems0, 0) tmpContext := make(map[string][]string) + for _, evt := range events { for key, values := range alertContext.ContextToSendCompiled { if _, ok := tmpContext[key]; !ok { tmpContext[key] = make([]string, 0) } + for _, value := range values { var val string + output, err := expr.Run(value, map[string]interface{}{"evt": evt}) if err != nil { errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err)) continue } + switch out := output.(type) { case string: val = out @@ -141,20 +146,24 @@ func EventToContext(events []types.Event) (models.Meta, []error) { errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output)) continue } + if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) } } } } + for key, values := range tmpContext { if len(values) == 0 { continue } + valueStr, err := truncate(values, alertContext.ContextValueLen) if err != nil { log.Warningf(err.Error()) } + meta := models.MetaItems0{ Key: key, Value: valueStr, @@ -163,5 +172,6 @@ func EventToContext(events []types.Event) (models.Meta, []error) { } ret := models.Meta(metas) + return ret, errors } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 2136edc8b8e..3f646071b0e 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -81,12 +81,12 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { func (a *apic) FetchScenariosListFromDB() ([]string, error) { scenarios := make([]string, 0) - machines, err := a.dbClient.ListMachines() + machines, err := a.dbClient.ListMachines() if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) } - //merge all scenarios together + // merge all scenarios together for _, v := range machines { machineScenarios := strings.Split(v.Scenarios, ",") log.Debugf("%d scenarios for machine %d", len(machineScenarios), v.ID) @@ -113,7 +113,7 @@ func decisionsToApiDecisions(decisions []*models.Decision) models.AddSignalsRequ Origin: ptr.Of(*decision.Origin), Scenario: ptr.Of(*decision.Scenario), Scope: ptr.Of(*decision.Scope), - //Simulated: *decision.Simulated, + // Simulated: *decision.Simulated, Type: ptr.Of(*decision.Type), Until: decision.Until, Value: ptr.Of(*decision.Value), @@ -196,8 +196,8 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con } password := strfmt.Password(config.Credentials.Password) - apiURL, err := url.Parse(config.Credentials.URL) + apiURL, err := url.Parse(config.Credentials.URL) if err != nil { return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.URL, err) } @@ -376,7 +376,6 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { defer cancel() _, _, err := a.apiClient.Signal.Add(ctx, &send) - if err != nil { log.Errorf("sending signal to central API: %s", err) return @@ -391,9 +390,8 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { defer cancel() _, _, err := a.apiClient.Signal.Add(ctx, &send) - if err != nil { - //we log it here as well, because the return value of func might be discarded + // we log it here as well, because the return value of func might be discarded log.Errorf("sending signal to central API: %s", err) } @@ -407,8 +405,8 @@ func (a *apic) CAPIPullIsOld() (bool, error) { alerts := a.dbClient.Ent.Alert.Query() alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID))) alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert - count, err := alerts.Count(a.dbClient.CTX) + count, err := alerts.Count(a.dbClient.CTX) if err != nil { return false, fmt.Errorf("while looking for CAPI alert: %w", err) } @@ -506,6 +504,7 @@ func createAlertsForDecisions(decisions []*models.Decision) []*models.Alert { if sub.Scenario == nil { log.Warningf("nil scenario in %+v", sub) } + if *sub.Scenario == *decision.Scenario { found = true break @@ -567,7 +566,7 @@ func createAlertForDecision(decision *models.Decision) *models.Alert { // This function takes in list of parent alerts and decisions and then pairs them up. func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decision, addCounters map[string]map[string]int) []*models.Alert { for _, decision := range decisions { - //count and create separate alerts for each list + // count and create separate alerts for each list updateCounterForDecision(addCounters, decision.Origin, decision.Scenario, 1) /*CAPI might send lower case scopes, unify it.*/ @@ -579,7 +578,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio } found := false - //add the individual decisions to the right list + // add the individual decisions to the right list for idx, alert := range alerts { if *decision.Origin == types.CAPIOrigin { if *alert.Source.Scope == types.CAPIOrigin { @@ -592,6 +591,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio if *alert.Source.Scope == types.ListOrigin && *alert.Scenario == *decision.Scenario { alerts[idx].Decisions = append(alerts[idx].Decisions, decision) found = true + break } } else { @@ -613,8 +613,8 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio func (a *apic) PullTop(forcePull bool) error { var err error - //A mutex with TryLock would be a bit simpler - //But go does not guarantee that TryLock will be able to acquire the lock even if it is available + // A mutex with TryLock would be a bit simpler + // But go does not guarantee that TryLock will be able to acquire the lock even if it is available select { case a.isPulling <- true: defer func() { @@ -633,6 +633,7 @@ func (a *apic) PullTop(forcePull bool) error { } log.Debug("Acquiring lock for pullCAPI") + err = a.dbClient.AcquirePullCAPILock() if a.dbClient.IsLocked(err) { log.Info("PullCAPI is already running, skipping") @@ -642,6 +643,7 @@ func (a *apic) PullTop(forcePull bool) error { /*defer lock release*/ defer func() { log.Debug("Releasing lock for pullCAPI") + if err := a.dbClient.ReleasePullCAPILock(); err != nil { log.Errorf("while releasing lock: %v", err) } @@ -681,7 +683,7 @@ func (a *apic) PullTop(forcePull bool) error { // create one alert for community blocklist using the first decision decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) - //apply APIC specific whitelists + // apply APIC specific whitelists decisions = a.ApplyApicWhitelists(decisions) alert := createAlertForDecision(decisions[0]) @@ -740,7 +742,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis if a.whitelists == nil || len(a.whitelists.Cidrs) == 0 && len(a.whitelists.Ips) == 0 { return decisions } - //deal with CAPI whitelists for fire. We want to avoid having a second list, so we shrink in place + // deal with CAPI whitelists for fire. We want to avoid having a second list, so we shrink in place outIdx := 0 for _, decision := range decisions { @@ -753,7 +755,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis decisions[outIdx] = decision outIdx++ } - //shrink the list, those are deleted items + // shrink the list, those are deleted items return decisions[:outIdx] } @@ -782,8 +784,8 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo alertQuery := a.dbClient.Ent.Alert.Query() alertQuery.Where(alert.SourceScopeEQ(fmt.Sprintf("%s:%s", types.ListOrigin, *blocklist.Name))) alertQuery.Order(ent.Desc(alert.FieldCreatedAt)) - alertInstance, err := alertQuery.First(context.Background()) + alertInstance, err := alertQuery.First(context.Background()) if err != nil { if ent.IsNotFound(err) { log.Debugf("no alert found for %s, force refresh", *blocklist.Name) @@ -795,8 +797,8 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo decisionQuery := a.dbClient.Ent.Decision.Query() decisionQuery.Where(decision.HasOwnerWith(alert.IDEQ(alertInstance.ID))) - firstDecision, err := decisionQuery.First(context.Background()) + firstDecision, err := decisionQuery.First(context.Background()) if err != nil { if ent.IsNotFound(err) { log.Debugf("no decision found for %s, force refresh", *blocklist.Name) @@ -872,7 +874,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap log.Infof("blocklist %s has no decisions", *blocklist.Name) return nil } - //apply APIC specific whitelists + // apply APIC specific whitelists decisions = a.ApplyApicWhitelists(decisions) alert := createAlertForDecision(decisions[0]) alertsFromCapi := []*models.Alert{alert} @@ -911,12 +913,17 @@ func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLink } func setAlertScenario(alert *models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) { - if *alert.Source.Scope == types.CAPIOrigin { + switch *alert.Source.Scope { + case types.CAPIOrigin: *alert.Source.Scope = types.CommunityBlocklistPullSourceScope - alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", addCounters[types.CAPIOrigin]["all"], deleteCounters[types.CAPIOrigin]["all"])) - } else if *alert.Source.Scope == types.ListOrigin { + alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", + addCounters[types.CAPIOrigin]["all"], + deleteCounters[types.CAPIOrigin]["all"])) + case types.ListOrigin: *alert.Source.Scope = fmt.Sprintf("%s:%s", types.ListOrigin, *alert.Scenario) - alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", addCounters[types.ListOrigin][*alert.Scenario], deleteCounters[types.ListOrigin][*alert.Scenario])) + alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", + addCounters[types.ListOrigin][*alert.Scenario], + deleteCounters[types.ListOrigin][*alert.Scenario])) } } @@ -988,11 +995,12 @@ func makeAddAndDeleteCounters() (map[string]map[string]int, map[string]map[strin } func updateCounterForDecision(counter map[string]map[string]int, origin *string, scenario *string, totalDecisions int) { - if *origin == types.CAPIOrigin { + switch *origin { + case types.CAPIOrigin: counter[*origin]["all"] += totalDecisions - } else if *origin == types.ListOrigin { + case types.ListOrigin: counter[*origin][*scenario] += totalDecisions - } else { + default: log.Warningf("Unknown origin %s", *origin) } } diff --git a/pkg/database/utils.go b/pkg/database/utils.go index 2414e702786..f1c06565635 100644 --- a/pkg/database/utils.go +++ b/pkg/database/utils.go @@ -13,12 +13,14 @@ func IP2Int(ip net.IP) uint32 { if len(ip) == 16 { return binary.BigEndian.Uint32(ip[12:16]) } + return binary.BigEndian.Uint32(ip) } func Int2ip(nn uint32) net.IP { ip := make(net.IP, 4) binary.BigEndian.PutUint32(ip, nn) + return ip } @@ -26,13 +28,14 @@ func IsIpv4(host string) bool { return net.ParseIP(host) != nil } -//Stolen from : https://github.com/llimllib/ipaddress/ +// Stolen from : https://github.com/llimllib/ipaddress/ // Return the final address of a net range. Convert to IPv4 if possible, // otherwise return an ipv6 func LastAddress(n *net.IPNet) net.IP { ip := n.IP.To4() if ip == nil { ip = n.IP + return net.IP{ ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], @@ -49,40 +52,44 @@ func LastAddress(n *net.IPNet) net.IP { ip[3]|^n.Mask[3]) } +// GetIpsFromIpRange takes a CIDR range and returns the start and end IP func GetIpsFromIpRange(host string) (int64, int64, error) { - var ipStart int64 - var ipEnd int64 - var err error - var parsedRange *net.IPNet - - if _, parsedRange, err = net.ParseCIDR(host); err != nil { - return ipStart, ipEnd, fmt.Errorf("'%s' is not a valid CIDR", host) + _, parsedRange, err := net.ParseCIDR(host) + if err != nil { + return 0, 0, fmt.Errorf("'%s' is not a valid CIDR", host) } + if parsedRange == nil { - return ipStart, ipEnd, fmt.Errorf("unable to parse network : %s", err) + return 0, 0, fmt.Errorf("unable to parse network: %w", err) } - ipStart = int64(IP2Int(parsedRange.IP)) - ipEnd = int64(IP2Int(LastAddress(parsedRange))) + + ipStart := int64(IP2Int(parsedRange.IP)) + ipEnd := int64(IP2Int(LastAddress(parsedRange))) return ipStart, ipEnd, nil } func ParseDuration(d string) (time.Duration, error) { durationStr := d + if strings.HasSuffix(d, "d") { days := strings.Split(d, "d")[0] if len(days) == 0 { return 0, fmt.Errorf("'%s' can't be parsed as duration", d) } + daysInt, err := strconv.Atoi(days) if err != nil { return 0, err } + durationStr = strconv.Itoa(daysInt*24) + "h" } + duration, err := time.ParseDuration(durationStr) if err != nil { return 0, err } + return duration, nil } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 85eee89d933..bc259c18319 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -34,42 +34,42 @@ type BucketFactory struct { Author string `yaml:"author"` Description string `yaml:"description"` References []string `yaml:"references"` - Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics - Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique - Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity - LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket - Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time - Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct - GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip - Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) - Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically - Labels map[string]interface{} `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow - Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration - logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well) - Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain - CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket - Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc. - OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through - ConditionalOverflow string `yaml:"condition"` //condition if present, is an expression that must return true for the bucket to overflow + Type string `yaml:"type"` // Type can be : leaky, counter, trigger. It determines the main bucket characteristics + Name string `yaml:"name"` // Name of the bucket, used later in log and user-messages. Should be unique + Capacity int `yaml:"capacity"` // Capacity is applicable to leaky buckets and determines the "burst" capacity + LeakSpeed string `yaml:"leakspeed"` // Leakspeed is a float representing how many events per second leak out of the bucket + Duration string `yaml:"duration"` // Duration allows 'counter' buckets to have a fixed life-time + Filter string `yaml:"filter"` // Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct + GroupBy string `yaml:"groupby,omitempty"` // groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip + Distinct string `yaml:"distinct"` // Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) + Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically + Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow + Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration + logger *log.Entry `yaml:"-"` // logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through + ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow BayesianPrior float32 `yaml:"bayesian_prior"` BayesianThreshold float32 `yaml:"bayesian_threshold"` - BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` //conditions for the bayesian bucket - ScopeType types.ScopeType `yaml:"scope,omitempty"` //to enforce a different remediation than blocking an IP. Will default this to IP + BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` // conditions for the bayesian bucket + ScopeType types.ScopeType `yaml:"scope,omitempty"` // to enforce a different remediation than blocking an IP. Will default this to IP BucketName string `yaml:"-"` Filename string `yaml:"-"` RunTimeFilter *vm.Program `json:"-"` RunTimeGroupBy *vm.Program `json:"-"` Data []*types.DataSource `yaml:"data,omitempty"` DataDir string `yaml:"-"` - CancelOnFilter string `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket - leakspeed time.Duration //internal representation of `Leakspeed` - duration time.Duration //internal representation of `Duration` - ret chan types.Event //the bucket-specific output chan for overflows - processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) - output bool //?? + CancelOnFilter string `yaml:"cancel_on,omitempty"` // a filter that, if matched, kills the bucket + leakspeed time.Duration // internal representation of `Leakspeed` + duration time.Duration // internal representation of `Duration` + ret chan types.Event // the bucket-specific output chan for overflows + processors []Processor // processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) + output bool // ?? ScenarioVersion string `yaml:"version,omitempty"` hash string `yaml:"-"` - Simulated bool `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list + Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list tomb *tomb.Tomb `yaml:"-"` wgPour *sync.WaitGroup `yaml:"-"` wgDumpState *sync.WaitGroup `yaml:"-"` @@ -81,66 +81,80 @@ var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC func ValidateFactory(bucketFactory *BucketFactory) error { if bucketFactory.Name == "" { - return fmt.Errorf("bucket must have name") + return errors.New("bucket must have name") } + if bucketFactory.Description == "" { - return fmt.Errorf("description is mandatory") + return errors.New("description is mandatory") } + if bucketFactory.Type == "leaky" { - if bucketFactory.Capacity <= 0 { //capacity must be a positive int + if bucketFactory.Capacity <= 0 { // capacity must be a positive int return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) } + if bucketFactory.LeakSpeed == "" { - return fmt.Errorf("leakspeed can't be empty for leaky") + return errors.New("leakspeed can't be empty for leaky") } + if bucketFactory.leakspeed == 0 { return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) } } else if bucketFactory.Type == "counter" { if bucketFactory.Duration == "" { - return fmt.Errorf("duration can't be empty for counter") + return errors.New("duration can't be empty for counter") } + if bucketFactory.duration == 0 { return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) } + if bucketFactory.Capacity != -1 { - return fmt.Errorf("counter bucket must have -1 capacity") + return errors.New("counter bucket must have -1 capacity") } } else if bucketFactory.Type == "trigger" { if bucketFactory.Capacity != 0 { - return fmt.Errorf("trigger bucket must have 0 capacity") + return errors.New("trigger bucket must have 0 capacity") } } else if bucketFactory.Type == "conditional" { if bucketFactory.ConditionalOverflow == "" { - return fmt.Errorf("conditional bucket must have a condition") + return errors.New("conditional bucket must have a condition") } + if bucketFactory.Capacity != -1 { bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") } + if bucketFactory.LeakSpeed == "" { - return fmt.Errorf("leakspeed can't be empty for conditional bucket") + return errors.New("leakspeed can't be empty for conditional bucket") } + if bucketFactory.leakspeed == 0 { return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) } } else if bucketFactory.Type == "bayesian" { if bucketFactory.BayesianConditions == nil { - return fmt.Errorf("bayesian bucket must have bayesian conditions") + return errors.New("bayesian bucket must have bayesian conditions") } + if bucketFactory.BayesianPrior == 0 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero prior") + return errors.New("bayesian bucket must have a valid, non-zero prior") } + if bucketFactory.BayesianThreshold == 0 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero threshold") + return errors.New("bayesian bucket must have a valid, non-zero threshold") } + if bucketFactory.BayesianPrior > 1 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero prior") + return errors.New("bayesian bucket must have a valid, non-zero prior") } + if bucketFactory.BayesianThreshold > 1 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero threshold") + return errors.New("bayesian bucket must have a valid, non-zero threshold") } + if bucketFactory.Capacity != -1 { - return fmt.Errorf("bayesian bucket must have capacity -1") + return errors.New("bayesian bucket must have capacity -1") } } else { return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) @@ -155,26 +169,31 @@ func ValidateFactory(bucketFactory *BucketFactory) error { runTimeFilter *vm.Program err error ) + if bucketFactory.ScopeType.Filter != "" { if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("Error compiling the scope filter: %s", err) + return fmt.Errorf("error compiling the scope filter: %w", err) } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter } default: - //Compile the scope filter + // Compile the scope filter var ( runTimeFilter *vm.Program err error ) + if bucketFactory.ScopeType.Filter != "" { if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("Error compiling the scope filter: %s", err) + return fmt.Errorf("error compiling the scope filter: %w", err) } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter } } + return nil } @@ -185,48 +204,58 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str ) response = make(chan types.Event, 1) + for _, f := range files { log.Debugf("Loading '%s'", f) + if !strings.HasSuffix(f, ".yaml") && !strings.HasSuffix(f, ".yml") { log.Debugf("Skipping %s : not a yaml file", f) continue } - //process the yaml + // process the yaml bucketConfigurationFile, err := os.Open(f) if err != nil { log.Errorf("Can't access leaky configuration file %s", f) return nil, nil, err } + defer bucketConfigurationFile.Close() dec := yaml.NewDecoder(bucketConfigurationFile) dec.SetStrict(true) + for { bucketFactory := BucketFactory{} + err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { log.Errorf("Bad yaml in %s : %v", f, err) return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) } + log.Tracef("End of yaml file") + break } + bucketFactory.DataDir = hub.GetDataDir() - //check empty + // check empty if bucketFactory.Name == "" { log.Errorf("Won't load nameless bucket") - return nil, nil, fmt.Errorf("nameless bucket") + return nil, nil, errors.New("nameless bucket") } - //check compat + // check compat if bucketFactory.FormatVersion == "" { log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) bucketFactory.FormatVersion = "1.0" } + ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) if err != nil { return nil, nil, fmt.Errorf("failed to check version : %s", err) } + if !ok { log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) continue @@ -235,6 +264,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.Filename = filepath.Clean(f) bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response + hubItem, err := hub.GetItemByPath(cwhub.SCENARIOS, bucketFactory.Filename) if err != nil { log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) @@ -242,6 +272,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str if cscfg.SimulationConfig != nil { bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) } + if hubItem != nil { bucketFactory.ScenarioVersion = hubItem.State.LocalVersion bucketFactory.hash = hubItem.State.LocalHash @@ -252,6 +283,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.wgDumpState = buckets.wgDumpState bucketFactory.wgPour = buckets.wgPour + err = LoadBucket(&bucketFactory, tomb) if err != nil { log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) @@ -265,21 +297,24 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str } if err := alertcontext.NewAlertContext(cscfg.ContextToSend, cscfg.ConsoleContextValueLength); err != nil { - return nil, nil, fmt.Errorf("unable to load alert context: %s", err) + return nil, nil, fmt.Errorf("unable to load alert context: %w", err) } log.Infof("Loaded %d scenarios", len(ret)) + return ret, response, nil } /* Init recursively process yaml files from a directory and loads them as BucketFactory */ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { var err error + if bucketFactory.Debug { - var clog = log.New() + clog := log.New() if err := types.ConfigureLogger(clog); err != nil { log.Fatalf("While creating bucket-specific logger : %s", err) } + clog.SetLevel(log.DebugLevel) bucketFactory.logger = clog.WithFields(log.Fields{ "cfg": bucketFactory.BucketName, @@ -300,6 +335,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } else { bucketFactory.leakspeed = time.Duration(0) } + if bucketFactory.Duration != "" { if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) @@ -308,8 +344,9 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Filter == "" { bucketFactory.logger.Warning("Bucket without filter, abort.") - return fmt.Errorf("bucket without filter directive") + return errors.New("bucket without filter directive") } + bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) @@ -323,7 +360,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } bucketFactory.logger.Infof("Adding %s bucket", bucketFactory.Type) - //return the Holder corresponding to the type of bucket + // return the Holder corresponding to the type of bucket bucketFactory.processors = []Processor{} switch bucketFactory.Type { case "leaky": @@ -352,21 +389,25 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.OverflowFilter != "" { bucketFactory.logger.Tracef("Adding an overflow filter") + filovflw, err := NewOverflowFilter(bucketFactory) if err != nil { bucketFactory.logger.Errorf("Error creating overflow_filter : %s", err) - return fmt.Errorf("error creating overflow_filter : %s", err) + return fmt.Errorf("error creating overflow_filter: %w", err) } + bucketFactory.processors = append(bucketFactory.processors, filovflw) } if bucketFactory.Blackhole != "" { bucketFactory.logger.Tracef("Adding blackhole.") + blackhole, err := NewBlackhole(bucketFactory) if err != nil { bucketFactory.logger.Errorf("Error creating blackhole : %s", err) - return fmt.Errorf("error creating blackhole : %s", err) + return fmt.Errorf("error creating blackhole : %w", err) } + bucketFactory.processors = append(bucketFactory.processors, blackhole) } @@ -380,19 +421,19 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.processors = append(bucketFactory.processors, &BayesianBucket{}) } - if len(bucketFactory.Data) > 0 { - for _, data := range bucketFactory.Data { - if data.DestPath == "" { - bucketFactory.logger.Errorf("no dest_file provided for '%s'", bucketFactory.Name) - continue - } - err = exprhelpers.FileInit(bucketFactory.DataDir, data.DestPath, data.Type) - if err != nil { - bucketFactory.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err) - } - if data.Type == "regexp" { //cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) - } + for _, data := range bucketFactory.Data { + if data.DestPath == "" { + bucketFactory.logger.Errorf("no dest_file provided for '%s'", bucketFactory.Name) + continue + } + + err = exprhelpers.FileInit(bucketFactory.DataDir, data.DestPath, data.Type) + if err != nil { + bucketFactory.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err) + } + + if data.Type == "regexp" { // cache only makes sense for regexp + exprhelpers.RegexpCacheInit(data.DestPath, *data) } } @@ -400,34 +441,40 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if err := ValidateFactory(bucketFactory); err != nil { return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) } + bucketFactory.tomb = tomb return nil - } func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFactory) error { var state map[string]Leaky + body, err := os.ReadFile(file) if err != nil { - return fmt.Errorf("can't state file %s : %s", file, err) + return fmt.Errorf("can't read state file %s: %w", file, err) } + if err := json.Unmarshal(body, &state); err != nil { - return fmt.Errorf("can't unmarshal state file %s : %s", file, err) + return fmt.Errorf("can't unmarshal state file %s: %w", file, err) } + for k, v := range state { var tbucket *Leaky + log.Debugf("Reloading bucket %s", k) + val, ok := buckets.Bucket_map.Load(k) if ok { log.Fatalf("key %s already exists : %+v", k, val) } - //find back our holder + // find back our holder found := false + for _, h := range bucketFactories { if h.Name == v.Name { log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) - //check in which mode the bucket was + // check in which mode the bucket was if v.Mode == types.TIMEMACHINE { tbucket = NewTimeMachine(h) } else if v.Mode == types.LIVE { @@ -451,16 +498,19 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac return LeakRoutine(tbucket) }) <-tbucket.Signal + found = true + break } } + if !found { log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) } } log.Infof("Restored %d buckets from dump", len(state)) - return nil + return nil } diff --git a/pkg/types/ip.go b/pkg/types/ip.go index 5e4d7734f2d..9d08afd8809 100644 --- a/pkg/types/ip.go +++ b/pkg/types/ip.go @@ -2,6 +2,7 @@ package types import ( "encoding/binary" + "errors" "fmt" "math" "net" @@ -15,6 +16,7 @@ func LastAddress(n net.IPNet) net.IP { if ip == nil { // IPv6 ip = n.IP + return net.IP{ ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], @@ -38,12 +40,13 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { if err != nil { return -1, 0, 0, 0, 0, fmt.Errorf("while parsing range %s: %w", anyIP, err) } + return Range2Ints(*net) } ip := net.ParseIP(anyIP) if ip == nil { - return -1, 0, 0, 0, 0, fmt.Errorf("invalid address") + return -1, 0, 0, 0, 0, errors.New("invalid address") } sz, start, end, err := IP2Ints(ip) @@ -56,19 +59,22 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { /*size (16|4), nw_start, suffix_start, nw_end, suffix_end, error*/ func Range2Ints(network net.IPNet) (int, int64, int64, int64, int64, error) { - szStart, nwStart, sfxStart, err := IP2Ints(network.IP) if err != nil { return -1, 0, 0, 0, 0, fmt.Errorf("converting first ip in range: %w", err) } + lastAddr := LastAddress(network) + szEnd, nwEnd, sfxEnd, err := IP2Ints(lastAddr) if err != nil { return -1, 0, 0, 0, 0, fmt.Errorf("transforming last address of range: %w", err) } + if szEnd != szStart { return -1, 0, 0, 0, 0, fmt.Errorf("inconsistent size for range first(%d) and last(%d) ip", szStart, szEnd) } + return szStart, nwStart, sfxStart, nwEnd, sfxEnd, nil } @@ -85,6 +91,7 @@ func uint2int(u uint64) int64 { ret = int64(u) ret -= math.MaxInt64 } + return ret } @@ -97,13 +104,15 @@ func IP2Ints(pip net.IP) (int, int64, int64, error) { if pip4 != nil { ip_nw32 := binary.BigEndian.Uint32(pip4) - return 4, uint2int(uint64(ip_nw32)), uint2int(ip_sfx), nil - } else if pip16 != nil { + } + + if pip16 != nil { ip_nw = binary.BigEndian.Uint64(pip16[0:8]) ip_sfx = binary.BigEndian.Uint64(pip16[8:16]) + return 16, uint2int(ip_nw), uint2int(ip_sfx), nil - } else { - return -1, 0, 0, fmt.Errorf("unexpected len %d for %s", len(pip), pip) } + + return -1, 0, 0, fmt.Errorf("unexpected len %d for %s", len(pip), pip) } From 3392e5e00f93fdd0e88d2e22a7c9acded777a811 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 14:22:02 +0200 Subject: [PATCH 113/318] enable linter "revive" (#2978) * enable linter "revive" * enable some revive checks * pointer receiver for crzLogger * fix "range-loop variables always have the same address" * lint (whitespace) --- .golangci.yml | 135 +++++++++++++++++++++++++++++++++++- cmd/crowdsec/output.go | 5 +- pkg/appsec/coraza_logger.go | 56 ++++++++++----- 3 files changed, 177 insertions(+), 19 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 8e60a3ca8f8..c044903d0f2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -111,6 +111,118 @@ linters-settings: - pkg: "gopkg.in/yaml.v2" desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" + revive: + ignore-generated-header: true + severity: error + enable-all-rules: true + rules: + - name: add-constant + disabled: true + - name: argument-limit + disabled: true + - name: bare-return + disabled: true + - name: blank-imports + disabled: true + - name: bool-literal-in-expr + disabled: true + - name: cognitive-complexity + disabled: true + - name: comment-spacings + disabled: true + - name: confusing-results + disabled: true + - name: context-as-argument + disabled: true + - name: cyclomatic + disabled: true + - name: deep-exit + disabled: true + - name: defer + disabled: true + - name: duplicated-imports + disabled: true + - name: early-return + disabled: true + - name: empty-block + disabled: true + - name: empty-lines + disabled: true + - name: error-naming + disabled: true + - name: error-strings + disabled: true + - name: flag-parameter + disabled: true + - name: function-result-limit + disabled: true + - name: function-length + disabled: true + - name: get-return + disabled: true + - name: if-return + disabled: true + - name: increment-decrement + disabled: true + - name: indent-error-flow + disabled: true + - name: import-alias-naming + disabled: true + - name: import-shadowing + disabled: true + - name: line-length-limit + disabled: true + - name: max-control-nesting + disabled: true + - name: max-public-structs + disabled: true + - name: modifies-parameter + disabled: true + - name: optimize-operands-order + disabled: true + - name: nested-structs + disabled: true + - name: package-comments + disabled: true + - name: redundant-import-alias + disabled: true + - name: struct-tag + disabled: true + - name: superfluous-else + disabled: true + - name: time-equal + disabled: true + - name: var-naming + disabled: true + - name: var-declaration + disabled: true + - name: unchecked-type-assertion + disabled: true + - name: exported + disabled: true + - name: unexported-naming + disabled: true + - name: unexported-return + disabled: true + - name: unhandled-error + disabled: true + arguments: + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + - name: unnecessary-stmt + disabled: true + - name: unreachable-code + disabled: true + - name: unused-parameter + disabled: true + - name: unused-receiver + disabled: true + - name: use-any + disabled: true + - name: useless-break + disabled: true + wsl: # Allow blocks to end with comments allow-trailing-comment: true @@ -191,6 +303,7 @@ linters: # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. # - predeclared # find code that shadows one of Go's predeclared identifiers # - reassign # Checks that package variables are not reassigned + # - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. # - rowserrcheck # checks whether Rows.Err of rows is checked successfully # - sloglint # ensure consistent code style when using log/slog # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. @@ -222,7 +335,6 @@ linters: - musttag # enforce field tags in (un)marshaled structs - promlinter # Check Prometheus metrics naming via promlint - protogetter # Reports direct reads from proto message fields when getters should be used - - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - tagalign # check that struct tags are well aligned - thelper # thelper detects tests helpers which is not start with t.Helper() method. - wrapcheck # Checks that errors returned from external packages are wrapped @@ -373,3 +485,24 @@ issues: - linters: - nonamedreturns text: "named return .* with type .* found" + + - linters: + - revive + path: pkg/leakybucket/manager_load.go + text: "confusing-naming: Field '.*' differs only by capitalization to other field in the struct type BucketFactory" + + - linters: + - revive + path: pkg/exprhelpers/helpers.go + text: "confusing-naming: Method 'flatten' differs only by capitalization to function 'Flatten' in the same source file" + + - linters: + - revive + path: pkg/appsec/query_utils.go + text: "confusing-naming: Method 'parseQuery' differs only by capitalization to function 'ParseQuery' in the same source file" + + - linters: + - revive + path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go + text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index ac05b502e52..6f507fdcd6f 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -26,11 +26,12 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { continue } - for k, src := range alert.Sources { + for k := range alert.Sources { refsrc := *alert.Alert // copy log.Tracef("source[%s]", k) + src := alert.Sources[k] refsrc.Source = &src dedupCache = append(dedupCache, &refsrc) } @@ -45,8 +46,8 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error { ctx := context.Background() - alertsToPush, err := dedupAlerts(alerts) + alertsToPush, err := dedupAlerts(alerts) if err != nil { return fmt.Errorf("failed to transform alerts for api: %w", err) } diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 372a0098ecc..7229f038b92 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -4,8 +4,9 @@ import ( "fmt" "io" - dbg "github.com/crowdsecurity/coraza/v3/debuglog" log "github.com/sirupsen/logrus" + + dbg "github.com/crowdsecurity/coraza/v3/debuglog" ) var DebugRules map[int]bool = map[int]bool{} @@ -18,6 +19,7 @@ func GetRuleDebug(id int) bool { if val, ok := DebugRules[id]; ok { return val } + return false } @@ -60,7 +62,9 @@ func (e *crzLogEvent) Str(key, val string) dbg.Event { if e.muted { return e } + e.fields[key] = val + return e } @@ -68,7 +72,9 @@ func (e *crzLogEvent) Err(err error) dbg.Event { if e.muted { return e } + e.fields["error"] = err + return e } @@ -76,13 +82,15 @@ func (e *crzLogEvent) Bool(key string, b bool) dbg.Event { if e.muted { return e } + e.fields[key] = b + return e } func (e *crzLogEvent) Int(key string, i int) dbg.Event { if e.muted { - //this allows us to have per-rule debug logging + // this allows us to have per-rule debug logging if key == "rule_id" && GetRuleDebug(i) { e.muted = false e.fields = map[string]interface{}{} @@ -91,7 +99,9 @@ func (e *crzLogEvent) Int(key string, i int) dbg.Event { return e } } + e.fields[key] = i + return e } @@ -99,7 +109,9 @@ func (e *crzLogEvent) Uint(key string, i uint) dbg.Event { if e.muted { return e } + e.fields[key] = i + return e } @@ -107,7 +119,9 @@ func (e *crzLogEvent) Stringer(key string, val fmt.Stringer) dbg.Event { if e.muted { return e } + e.fields[key] = val + return e } @@ -121,74 +135,84 @@ type crzLogger struct { logLevel log.Level } -func NewCrzLogger(logger *log.Entry) crzLogger { - return crzLogger{logger: logger, logLevel: logger.Logger.GetLevel()} +func NewCrzLogger(logger *log.Entry) *crzLogger { + return &crzLogger{logger: logger, logLevel: logger.Logger.GetLevel()} } -func (c crzLogger) NewMutedEvt(lvl log.Level) dbg.Event { +func (c *crzLogger) NewMutedEvt(lvl log.Level) dbg.Event { return &crzLogEvent{muted: true, logger: c.logger, level: lvl} } -func (c crzLogger) NewEvt(lvl log.Level) dbg.Event { + +func (c *crzLogger) NewEvt(lvl log.Level) dbg.Event { evt := &crzLogEvent{fields: map[string]interface{}{}, logger: c.logger, level: lvl} + if c.defaultFields != nil { for k, v := range c.defaultFields { evt.fields[k] = v } } + return evt } -func (c crzLogger) WithOutput(w io.Writer) dbg.Logger { +func (c *crzLogger) WithOutput(w io.Writer) dbg.Logger { return c } -func (c crzLogger) WithLevel(lvl dbg.Level) dbg.Logger { +func (c *crzLogger) WithLevel(lvl dbg.Level) dbg.Logger { c.logLevel = log.Level(lvl) c.logger.Logger.SetLevel(c.logLevel) + return c } -func (c crzLogger) With(fs ...dbg.ContextField) dbg.Logger { - var e dbg.Event = c.NewEvt(c.logLevel) +func (c *crzLogger) With(fs ...dbg.ContextField) dbg.Logger { + e := c.NewEvt(c.logLevel) for _, f := range fs { e = f(e) } + c.defaultFields = e.(*crzLogEvent).fields + return c } -func (c crzLogger) Trace() dbg.Event { +func (c *crzLogger) Trace() dbg.Event { if c.logLevel < log.TraceLevel { return c.NewMutedEvt(log.TraceLevel) } + return c.NewEvt(log.TraceLevel) } -func (c crzLogger) Debug() dbg.Event { +func (c *crzLogger) Debug() dbg.Event { if c.logLevel < log.DebugLevel { return c.NewMutedEvt(log.DebugLevel) - } + return c.NewEvt(log.DebugLevel) } -func (c crzLogger) Info() dbg.Event { +func (c *crzLogger) Info() dbg.Event { if c.logLevel < log.InfoLevel { return c.NewMutedEvt(log.InfoLevel) } + return c.NewEvt(log.InfoLevel) } -func (c crzLogger) Warn() dbg.Event { +func (c *crzLogger) Warn() dbg.Event { if c.logLevel < log.WarnLevel { return c.NewMutedEvt(log.WarnLevel) } + return c.NewEvt(log.WarnLevel) } -func (c crzLogger) Error() dbg.Event { +func (c *crzLogger) Error() dbg.Event { if c.logLevel < log.ErrorLevel { return c.NewMutedEvt(log.ErrorLevel) } + return c.NewEvt(log.ErrorLevel) } From 659feec496387cf9dcfe647d4d579b3557fb6751 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 6 May 2024 10:43:54 +0200 Subject: [PATCH 114/318] cscli: don't print timestamps (#2984) * cscli: don't print timestamps * lint (whitespace, errors) --- cmd/crowdsec-cli/main.go | 2 ++ pkg/apiserver/papi.go | 60 +++++++++++++++++++++++----------------- pkg/database/database.go | 3 +- pkg/database/flush.go | 3 +- pkg/hubtest/utils.go | 5 ++-- pkg/setup/detect.go | 6 ++-- pkg/setup/detect_test.go | 6 +++- pkg/setup/install.go | 3 +- pkg/setup/units.go | 3 +- 9 files changed, 56 insertions(+), 35 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 0705faa4065..e3c45390a18 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -132,6 +132,8 @@ func (cli *cliRoot) initialize() { log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } + log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) + if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) log.SetLevel(log.ErrorLevel) diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index a3996850a2b..8dbd1bb9641 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -3,6 +3,7 @@ package apiserver import ( "context" "encoding/json" + "errors" "fmt" "net/http" "sync" @@ -21,21 +22,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - SyncInterval = time.Second * 10 -) +var SyncInterval = time.Second * 10 -const ( - PapiPullKey = "papi:last_pull" -) +const PapiPullKey = "papi:last_pull" -var ( - operationMap = map[string]func(*Message, *Papi, bool) error{ - "decision": DecisionCmd, - "alert": AlertCmd, - "management": ManagementCmd, - } -) +var operationMap = map[string]func(*Message, *Papi, bool) error{ + "decision": DecisionCmd, + "alert": AlertCmd, + "management": ManagementCmd, +} type Header struct { OperationType string `json:"operation_type"` @@ -87,21 +82,21 @@ type PapiPermCheckSuccess struct { } func NewPAPI(apic *apic, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, logLevel log.Level) (*Papi, error) { - logger := log.New() if err := types.ConfigureLogger(logger); err != nil { - return &Papi{}, fmt.Errorf("creating papi logger: %s", err) + return &Papi{}, fmt.Errorf("creating papi logger: %w", err) } + logger.SetLevel(logLevel) papiUrl := *apic.apiClient.PapiURL papiUrl.Path = fmt.Sprintf("%s%s", types.PAPIVersion, types.PAPIPollUrl) + longPollClient, err := longpollclient.NewLongPollClient(longpollclient.LongPollClientConfig{ Url: papiUrl, Logger: logger, HttpClient: apic.apiClient.GetClient(), }) - if err != nil { return &Papi{}, fmt.Errorf("failed to create PAPI client: %w", err) } @@ -132,55 +127,68 @@ func NewPAPI(apic *apic, dbClient *database.Client, consoleConfig *csconfig.Cons func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { logger := p.Logger.WithField("request-id", event.RequestId) logger.Debugf("message received: %+v", event.Data) + message := &Message{} if err := json.Unmarshal([]byte(event.Data), message); err != nil { return fmt.Errorf("polling papi message format is not compatible: %+v: %s", event.Data, err) } + if message.Header == nil { - return fmt.Errorf("no header in message, skipping") + return errors.New("no header in message, skipping") } + if message.Header.Source == nil { - return fmt.Errorf("no source user in header message, skipping") + return errors.New("no source user in header message, skipping") } if operationFunc, ok := operationMap[message.Header.OperationType]; ok { logger.Debugf("Calling operation '%s'", message.Header.OperationType) + err := operationFunc(message, p, sync) if err != nil { - return fmt.Errorf("'%s %s failed: %s", message.Header.OperationType, message.Header.OperationCmd, err) + return fmt.Errorf("'%s %s failed: %w", message.Header.OperationType, message.Header.OperationCmd, err) } } else { return fmt.Errorf("operation '%s' unknown, continue", message.Header.OperationType) } + return nil } func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { httpClient := p.apiClient.GetClient() papiCheckUrl := fmt.Sprintf("%s%s%s", p.URL, types.PAPIVersion, types.PAPIPermissionsUrl) + req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request : %s", err) } + resp, err := httpClient.Do(req) if err != nil { log.Fatalf("failed to get response : %s", err) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { errResp := PapiPermCheckError{} + err = json.NewDecoder(resp.Body).Decode(&errResp) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) } + return PapiPermCheckSuccess{}, fmt.Errorf("unable to query PAPI : %s (%d)", errResp.Error, resp.StatusCode) } + respBody := PapiPermCheckSuccess{} + err = json.NewDecoder(resp.Body).Decode(&respBody) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) } + return respBody, nil } @@ -202,7 +210,7 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { return err } - reversedEvents := reverse(events) //PAPI sends events in the reverse order, which is not an issue when pulling them in real time, but here we need the correct order + reversedEvents := reverse(events) // PAPI sends events in the reverse order, which is not an issue when pulling them in real time, but here we need the correct order eventsCount := len(events) p.Logger.Infof("received %d events", eventsCount) @@ -215,8 +223,8 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { } p.Logger.Debugf("finished handling events") - //Don't update the timestamp in DB, as a "real" LAPI might be running - //Worst case, crowdsec will receive a few duplicated events and will discard them + // Don't update the timestamp in DB, as a "real" LAPI might be running + // Worst case, crowdsec will receive a few duplicated events and will discard them return nil } @@ -232,7 +240,7 @@ func (p *Papi) Pull() error { p.Logger.Warningf("failed to get last timestamp for papi pull: %s", err) } - //value doesn't exist, it's first time we're pulling + // value doesn't exist, it's first time we're pulling if lastTimestampStr == nil { binTime, err := lastTimestamp.MarshalText() if err != nil { @@ -254,7 +262,7 @@ func (p *Papi) Pull() error { for event := range p.Client.Start(lastTimestamp) { logger := p.Logger.WithField("request-id", event.RequestId) - //update last timestamp in database + // update last timestamp in database newTime := time.Now().UTC() binTime, err := newTime.MarshalText() @@ -329,7 +337,7 @@ func (p *Papi) SyncDecisions() error { func (p *Papi) SendDeletedDecisions(cacheOrig *models.DecisionsDeleteRequest) { var ( cache []models.DecisionsDeleteRequestItem = *cacheOrig - send models.DecisionsDeleteRequest + send models.DecisionsDeleteRequest ) bulkSize := 50 @@ -359,7 +367,7 @@ func (p *Papi) SendDeletedDecisions(cacheOrig *models.DecisionsDeleteRequest) { _, _, err := p.apiClient.DecisionDelete.Add(ctx, &send) if err != nil { - //we log it here as well, because the return value of func might be discarded + // we log it here as well, because the return value of func might be discarded p.Logger.Errorf("sending deleted decisions to central API: %s", err) } diff --git a/pkg/database/database.go b/pkg/database/database.go index aa191d7dc43..d984aefb170 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -3,6 +3,7 @@ package database import ( "context" "database/sql" + "errors" "fmt" "os" @@ -47,7 +48,7 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { var client *ent.Client var err error if config == nil { - return &Client{}, fmt.Errorf("DB config is empty") + return &Client{}, errors.New("DB config is empty") } /*The logger that will be used by db operations*/ clog := log.New() diff --git a/pkg/database/flush.go b/pkg/database/flush.go index a7b364fa970..ad4a912de84 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -1,6 +1,7 @@ package database import ( + "errors" "fmt" "time" @@ -21,7 +22,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched maxItems := 0 maxAge := "" if config.MaxItems != nil && *config.MaxItems <= 0 { - return nil, fmt.Errorf("max_items can't be zero or negative number") + return nil, errors.New("max_items can't be zero or negative number") } if config.MaxItems != nil { maxItems = *config.MaxItems diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go index 9009d0dddec..a7373fcc0bf 100644 --- a/pkg/hubtest/utils.go +++ b/pkg/hubtest/utils.go @@ -1,6 +1,7 @@ package hubtest import ( + "errors" "fmt" "net" "os" @@ -56,7 +57,7 @@ func checkPathNotContained(path string, subpath string) error { for { if current == absPath { - return fmt.Errorf("cannot copy a folder onto itself") + return errors.New("cannot copy a folder onto itself") } up := filepath.Dir(current) @@ -87,7 +88,7 @@ func CopyDir(src string, dest string) error { } if !file.IsDir() { - return fmt.Errorf("Source " + file.Name() + " is not a directory!") + return errors.New("Source " + file.Name() + " is not a directory!") } err = os.MkdirAll(dest, 0755) diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 7d73092f74e..5deff904e19 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -2,6 +2,7 @@ package setup import ( "bytes" + "errors" "fmt" "io" "os" @@ -53,6 +54,7 @@ func validateDataSource(opaqueDS DataSourceItem) error { // formally validate YAML commonDS := configuration.DataSourceCommonCfg{} + body, err := yaml.Marshal(opaqueDS) if err != nil { return err @@ -66,7 +68,7 @@ func validateDataSource(opaqueDS DataSourceItem) error { // source is mandatory // XXX unless it's not? if commonDS.Source == "" { - return fmt.Errorf("source is empty") + return errors.New("source is empty") } // source must be known @@ -104,7 +106,7 @@ func readDetectConfig(fin io.Reader) (DetectConfig, error) { switch dc.Version { case "": - return DetectConfig{}, fmt.Errorf("missing version tag (must be 1.0)") + return DetectConfig{}, errors.New("missing version tag (must be 1.0)") case "1.0": // all is well default: diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 242ade0494b..0ff3438d974 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -99,6 +99,7 @@ func TestPathExists(t *testing.T) { t.Run(tc.path, func(t *testing.T) { t.Parallel() + actual := env.PathExists(tc.path) require.Equal(t, tc.expected, actual) }) @@ -152,6 +153,7 @@ func TestVersionCheck(t *testing.T) { t.Run(fmt.Sprintf("Check(%s,%s)", tc.version, tc.constraint), func(t *testing.T) { t.Parallel() + actual, err := e.VersionCheck(tc.constraint) cstest.RequireErrorContains(t, err, tc.expectedErr) require.Equal(t, tc.expected, actual) @@ -249,8 +251,10 @@ func TestListSupported(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() + f := tempYAML(t, tc.yml) defer os.Remove(f.Name()) + supported, err := setup.ListSupported(&f) cstest.RequireErrorContains(t, err, tc.expectedErr) require.ElementsMatch(t, tc.expected, supported) @@ -332,6 +336,7 @@ func TestApplyRules(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() + svc := setup.Service{When: tc.rules} _, actualOk, err := setup.ApplyRules(svc, env) //nolint:typecheck,nolintlint // exported only for tests cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -840,7 +845,6 @@ func TestDetectForcedOS(t *testing.T) { func TestDetectDatasourceValidation(t *testing.T) { // It could be a good idea to test UnmarshalConfig() separately in addition // to Configure(), in each datasource. For now, we test these here. - require := require.New(t) setup.ExecCommand = fakeExecCommand diff --git a/pkg/setup/install.go b/pkg/setup/install.go index fc922c5d19b..dc85706a15c 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -2,6 +2,7 @@ package setup import ( "bytes" + "errors" "fmt" "os" "path/filepath" @@ -173,7 +174,7 @@ func marshalAcquisDocuments(ads []AcquisDocument, toDir string) (string, error) if toDir != "" { if ad.AcquisFilename == "" { - return "", fmt.Errorf("empty acquis filename") + return "", errors.New("empty acquis filename") } fname := filepath.Join(toDir, ad.AcquisFilename) diff --git a/pkg/setup/units.go b/pkg/setup/units.go index a0bccba4aac..ab1eec6f33e 100644 --- a/pkg/setup/units.go +++ b/pkg/setup/units.go @@ -2,6 +2,7 @@ package setup import ( "bufio" + "errors" "fmt" "strings" @@ -41,7 +42,7 @@ func systemdUnitList() ([]string, error) { if !header { spaceIdx := strings.IndexRune(line, ' ') if spaceIdx == -1 { - return ret, fmt.Errorf("can't parse systemctl output") + return ret, errors.New("can't parse systemctl output") } line = line[:spaceIdx] From a2dcc0ef9a534fd9998651fa0b31338acd24e109 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 6 May 2024 12:33:54 +0200 Subject: [PATCH 115/318] cscli: remove global dbClient (#2985) * cscli: remove global dbClient * lint (whitespace, errors) --- cmd/crowdsec-cli/main.go | 2 - cmd/crowdsec-cli/papi.go | 14 +++---- cmd/crowdsec-cli/support.go | 2 +- pkg/database/database.go | 46 ++++++++++++++--------- pkg/leakybucket/manager_run.go | 6 +-- pkg/leakybucket/overflows.go | 68 +++++++++++++++++++++++++++------- 6 files changed, 94 insertions(+), 44 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index e3c45390a18..95c528f20b5 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -15,14 +15,12 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) var ( ConfigFilePath string csConfig *csconfig.Config - dbClient *database.Client ) type configGetter func() *csconfig.Config diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 5808fcce5f6..558409b2d4d 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -62,17 +62,17 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { RunE: func(_ *cobra.Command, _ []string) error { var err error cfg := cli.cfg() - dbClient, err = database.NewClient(cfg.DbConfig) + db, err := database.NewClient(cfg.DbConfig) if err != nil { return fmt.Errorf("unable to initialize database client: %w", err) } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } - papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { return fmt.Errorf("unable to initialize PAPI client: %w", err) } @@ -82,7 +82,7 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { return fmt.Errorf("unable to get PAPI permissions: %w", err) } var lastTimestampStr *string - lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey) + lastTimestampStr, err = db.GetConfigItem(apiserver.PapiPullKey) if err != nil { lastTimestampStr = ptr.Of("never") } @@ -113,19 +113,19 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { cfg := cli.cfg() t := tomb.Tomb{} - dbClient, err = database.NewClient(cfg.DbConfig) + db, err := database.NewClient(cfg.DbConfig) if err != nil { return fmt.Errorf("unable to initialize database client: %w", err) } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } t.Go(apic.Push) - papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { return fmt.Errorf("unable to initialize PAPI client: %w", err) } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 418a981adee..5890061f502 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -331,7 +331,7 @@ cscli support dump -f /tmp/crowdsec-support.zip outFile = "/tmp/crowdsec-support.zip" } - dbClient, err = database.NewClient(csConfig.DbConfig) + dbClient, err := database.NewClient(csConfig.DbConfig) if err != nil { log.Warnf("Could not connect to database: %s", err) skipDB = true diff --git a/pkg/database/database.go b/pkg/database/database.go index d984aefb170..96a495f6731 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -35,72 +35,84 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. if err != nil { return nil, err } + if config.MaxOpenConns == nil { log.Warningf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) config.MaxOpenConns = ptr.Of(csconfig.DEFAULT_MAX_OPEN_CONNS) } + db.SetMaxOpenConns(*config.MaxOpenConns) drv := entsql.OpenDB(dbdialect, db) + return drv, nil } func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { var client *ent.Client - var err error + if config == nil { - return &Client{}, errors.New("DB config is empty") + return nil, errors.New("DB config is empty") } /*The logger that will be used by db operations*/ clog := log.New() if err := types.ConfigureLogger(clog); err != nil { return nil, fmt.Errorf("while configuring db logger: %w", err) } + if config.LogLevel != nil { clog.SetLevel(*config.LogLevel) } - entLogger := clog.WithField("context", "ent") + entLogger := clog.WithField("context", "ent") entOpt := ent.Log(entLogger.Debug) + typ, dia, err := config.ConnectionDialect() if err != nil { - return &Client{}, err //unsupported database caught here + return nil, err // unsupported database caught here } + if config.Type == "sqlite" { /*if it's the first startup, we want to touch and chmod file*/ if _, err := os.Stat(config.DbPath); os.IsNotExist(err) { - f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0600) + f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { - return &Client{}, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) + return nil, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) } + if err := f.Close(); err != nil { - return &Client{}, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) + return nil, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) } } - //Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) - if err := setFilePerm(config.DbPath, 0640); err != nil { - return &Client{}, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) + // Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) + if err := setFilePerm(config.DbPath, 0o640); err != nil { + return nil, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) } } + drv, err := getEntDriver(typ, dia, config.ConnectionString(), config) if err != nil { - return &Client{}, fmt.Errorf("failed opening connection to %s: %v", config.Type, err) + return nil, fmt.Errorf("failed opening connection to %s: %v", config.Type, err) } + client = ent.NewClient(ent.Driver(drv), entOpt) + if config.LogLevel != nil && *config.LogLevel >= log.DebugLevel { clog.Debugf("Enabling request debug") + client = client.Debug() } + if err = client.Schema.Create(context.Background()); err != nil { return nil, fmt.Errorf("failed creating schema resources: %v", err) } return &Client{ - Ent: client, - CTX: context.Background(), - Log: clog, - CanFlush: true, - Type: config.Type, - WalMode: config.UseWal, + Ent: client, + CTX: context.Background(), + Log: clog, + CanFlush: true, + Type: config.Type, + WalMode: config.UseWal, decisionBulkSize: config.DecisionBulkSize, }, nil } diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index ae7a86a4e4e..1d34c238ea5 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -85,7 +85,7 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) defer buckets.wgDumpState.Done() if outputdir == "" { - return "", fmt.Errorf("empty output dir for dump bucket state") + return "", errors.New("empty output dir for dump bucket state") } tmpFd, err := os.CreateTemp(os.TempDir(), "crowdsec-buckets-dump-") if err != nil { @@ -132,11 +132,11 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) }) bbuckets, err := json.MarshalIndent(serialized, "", " ") if err != nil { - return "", fmt.Errorf("Failed to unmarshal buckets : %s", err) + return "", fmt.Errorf("failed to unmarshal buckets: %s", err) } size, err := tmpFd.Write(bbuckets) if err != nil { - return "", fmt.Errorf("failed to write temp file : %s", err) + return "", fmt.Errorf("failed to write temp file: %s", err) } log.Infof("Serialized %d live buckets (+%d expired) in %d bytes to %s", len(serialized), discard, size, tmpFd.Name()) serialized = nil diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 80226aafb2a..8092ef35e77 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -1,6 +1,7 @@ package leakybucket import ( + "errors" "fmt" "net" "sort" @@ -22,9 +23,7 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e /*if it's already an overflow, we have properly formatted sources. we can just twitch them to reflect the requested scope*/ if evt.Type == types.OVFLW { - for k, v := range evt.Overflow.Sources { - /*the scopes are already similar, nothing to do*/ if leaky.scopeType.Scope == *v.Scope { srcs[k] = v @@ -46,20 +45,25 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e src.Scope = new(string) *src.Scope = leaky.scopeType.Scope *src.Value = "" + if v.Range != "" { *src.Value = v.Range } + if leaky.scopeType.RunTimeFilter != nil { retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) } + value, ok := retValue.(string) if !ok { value = "" } + src.Value = &value } + if *src.Value != "" { srcs[*src.Value] = src } else { @@ -71,50 +75,64 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e } } } + return srcs, nil } + src := models.Source{} + switch leaky.scopeType.Scope { case types.Range, types.Ip: v, ok := evt.Meta["source_ip"] if !ok { return srcs, fmt.Errorf("scope is %s but Meta[source_ip] doesn't exist", leaky.scopeType.Scope) } + if net.ParseIP(v) == nil { return srcs, fmt.Errorf("scope is %s but '%s' isn't a valid ip", leaky.scopeType.Scope, v) } + src.IP = v src.Scope = &leaky.scopeType.Scope + if v, ok := evt.Enriched["ASNumber"]; ok { src.AsNumber = v } else if v, ok := evt.Enriched["ASNNumber"]; ok { src.AsNumber = v } + if v, ok := evt.Enriched["IsoCode"]; ok { src.Cn = v } + if v, ok := evt.Enriched["ASNOrg"]; ok { src.AsName = v } + if v, ok := evt.Enriched["Latitude"]; ok { l, err := strconv.ParseFloat(v, 32) if err != nil { log.Warningf("bad latitude %s : %s", v, err) } + src.Latitude = float32(l) } + if v, ok := evt.Enriched["Longitude"]; ok { l, err := strconv.ParseFloat(v, 32) if err != nil { log.Warningf("bad longitude %s : %s", v, err) } + src.Longitude = float32(l) } + if v, ok := evt.Meta["SourceRange"]; ok && v != "" { _, ipNet, err := net.ParseCIDR(v) if err != nil { - return srcs, fmt.Errorf("Declared range %s of %s can't be parsed", v, src.IP) + return srcs, fmt.Errorf("declared range %s of %s can't be parsed", v, src.IP) } + if ipNet != nil { src.Range = ipNet.String() leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) @@ -124,6 +142,7 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e src.Value = &src.IP } else if leaky.scopeType.Scope == types.Range { src.Value = &src.Range + if leaky.scopeType.RunTimeFilter != nil { retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { @@ -134,14 +153,17 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e if !ok { value = "" } + src.Value = &value } } + srcs[*src.Value] = src default: if leaky.scopeType.RunTimeFilter == nil { - return srcs, fmt.Errorf("empty scope information") + return srcs, errors.New("empty scope information") } + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) @@ -151,30 +173,34 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e if !ok { value = "" } + src.Value = &value src.Scope = new(string) *src.Scope = leaky.scopeType.Scope srcs[*src.Value] = src } + return srcs, nil } // EventsFromQueue iterates the queue to collect & prepare meta-datas from alert func EventsFromQueue(queue *types.Queue) []*models.Event { - events := []*models.Event{} for _, evt := range queue.Queue { if evt.Meta == nil { continue } + meta := models.Meta{} - //we want consistence + // we want consistence skeys := make([]string, 0, len(evt.Meta)) for k := range evt.Meta { skeys = append(skeys, k) } + sort.Strings(skeys) + for _, k := range skeys { v := evt.Meta[k] subMeta := models.MetaItems0{Key: k, Value: v} @@ -185,12 +211,13 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { ovflwEvent := models.Event{ Meta: meta, } - //either MarshaledTime is present and is extracted from log + // either MarshaledTime is present and is extracted from log if evt.MarshaledTime != "" { tmpTimeStamp := evt.MarshaledTime ovflwEvent.Timestamp = &tmpTimeStamp - } else if !evt.Time.IsZero() { //or .Time has been set during parse as time.Now().UTC() + } else if !evt.Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() ovflwEvent.Timestamp = new(string) + raw, err := evt.Time.MarshalText() if err != nil { log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) @@ -203,6 +230,7 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { events = append(events, &ovflwEvent) } + return events } @@ -218,17 +246,21 @@ func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Sour if err != nil { return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err) } + for key, src := range srcs { if source_type == types.Undefined { source_type = *src.Scope } + if *src.Scope != source_type { return nil, "", fmt.Errorf("event has multiple source types : %s != %s", *src.Scope, source_type) } + sources[key] = src } } + return sources, source_type, nil } @@ -244,10 +276,12 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { if err != nil { log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) } + stop_at, err := leaky.Ovflw_ts.MarshalText() if err != nil { log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) } + capacity := int32(leaky.Capacity) EventsCount := int32(leaky.Total_count) leakSpeed := leaky.Leakspeed.String() @@ -266,19 +300,20 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { Simulated: &leaky.Simulated, } if leaky.BucketConfig == nil { - return runtimeAlert, fmt.Errorf("leaky.BucketConfig is nil") + return runtimeAlert, errors.New("leaky.BucketConfig is nil") } - //give information about the bucket + // give information about the bucket runtimeAlert.Mapkey = leaky.Mapkey - //Get the sources from Leaky/Queue + // Get the sources from Leaky/Queue sources, source_scope, err := alertFormatSource(leaky, queue) if err != nil { return runtimeAlert, fmt.Errorf("unable to collect sources from bucket: %w", err) } + runtimeAlert.Sources = sources - //Include source info in format string + // Include source info in format string sourceStr := "UNKNOWN" if len(sources) > 1 { sourceStr = fmt.Sprintf("%d sources", len(sources)) @@ -290,19 +325,22 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { } *apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts) - //Get the events from Leaky/Queue + // Get the events from Leaky/Queue apiAlert.Events = EventsFromQueue(queue) + var warnings []error + apiAlert.Meta, warnings = alertcontext.EventToContext(leaky.Queue.GetQueue()) for _, w := range warnings { log.Warningf("while extracting context from bucket %s : %s", leaky.Name, w) } - //Loop over the Sources and generate appropriate number of ApiAlerts + // Loop over the Sources and generate appropriate number of ApiAlerts for _, srcValue := range sources { newApiAlert := apiAlert srcCopy := srcValue newApiAlert.Source = &srcCopy + if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == true { newApiAlert.Remediation = true } @@ -312,6 +350,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { log.Errorf("->%s", spew.Sdump(newApiAlert)) log.Fatalf("error : %s", err) } + runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) } @@ -322,5 +361,6 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { if leaky.Reprocess { runtimeAlert.Reprocess = true } + return runtimeAlert, nil } From 11da728b514be8df3c0a3acffb8a6d86230b06c2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 12:59:38 +0200 Subject: [PATCH 116/318] cscli support: collect profiling data and logs (#2987) * extract methods, avoid globals * collect logs to file dump.log * include pprof data * include latest logs --- cmd/crowdsec-cli/capi.go | 58 +-- cmd/crowdsec-cli/lapi.go | 44 ++- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/support.go | 717 +++++++++++++++++++++--------------- pkg/database/database.go | 2 +- test/bats/01_cscli.bats | 2 +- 6 files changed, 489 insertions(+), 336 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index b5180d0505a..b89d9c7edb0 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -155,23 +155,11 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -func (cli *cliCapi) status() error { - cfg := cli.cfg() - - if err := require.CAPIRegistered(cfg); err != nil { - return err - } - - password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) - - apiurl, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) +// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct +func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { + apiURL, err := url.Parse(credURL) if err != nil { - return fmt.Errorf("parsing api url ('%s'): %w", cfg.API.Server.OnlineClient.Credentials.URL, err) - } - - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err + return fmt.Errorf("parsing api url: %w", err) } scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) @@ -183,22 +171,48 @@ func (cli *cliCapi) status() error { return errors.New("no scenarios installed, abort") } - Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) + Client, err = apiclient.NewDefaultClient(apiURL, + CAPIURLPrefix, + fmt.Sprintf("crowdsec/%s", version.String()), + nil) if err != nil { return fmt.Errorf("init default client: %w", err) } + pw := strfmt.Password(password) + t := models.WatcherAuthRequest{ - MachineID: &cfg.API.Server.OnlineClient.Credentials.Login, - Password: &password, + MachineID: &login, + Password: &pw, Scenarios: scenarios, } - log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cfg.API.Server.OnlineClient.Credentials.Login, apiurl) - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { + return err + } + + return nil +} + +func (cli *cliCapi) status() error { + cfg := cli.cfg() + + if err := require.CAPIRegistered(cfg); err != nil { + return err + } + + cred := cfg.API.Server.OnlineClient.Credentials + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + + if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 369de5b426b..7cffd7ffc7f 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -39,23 +39,13 @@ func NewCLILapi(cfg configGetter) *cliLapi { } } -func (cli *cliLapi) status() error { - cfg := cli.cfg() - password := strfmt.Password(cfg.API.Client.Credentials.Password) - login := cfg.API.Client.Credentials.Login - - origURL := cfg.API.Client.Credentials.URL - - apiURL, err := url.Parse(origURL) +// QueryLAPIStatus checks if the Local API is reachable, and if the credentials are correct +func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { + apiURL, err := url.Parse(credURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get scenarios: %w", err) @@ -69,18 +59,36 @@ func (cli *cliLapi) status() error { return fmt.Errorf("init default client: %w", err) } + pw := strfmt.Password(password) + t := models.WatcherAuthRequest{ MachineID: &login, - Password: &password, + Password: &pw, Scenarios: scenarios, } - log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - // use the original string because apiURL would print 'http://unix/' - log.Infof("Trying to authenticate with username %s on %s", login, origURL) - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { + return err + } + + return nil +} + +func (cli *cliLapi) status() error { + cfg := cli.cfg() + + cred := cfg.API.Client.Credentials + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + + if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 95c528f20b5..3881818123f 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -258,7 +258,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISupport().NewCommand()) + cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 5890061f502..54b2e7ad9ad 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -7,52 +7,67 @@ import ( "errors" "fmt" "io" + "net" "net/http" - "net/url" "os" "path/filepath" "regexp" + "strconv" "strings" "time" "github.com/blackfireio/osinfo" - "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" - "github.com/crowdsecurity/crowdsec/pkg/models" ) const ( - SUPPORT_METRICS_HUMAN_PATH = "metrics/metrics.human" - SUPPORT_METRICS_PROMETHEUS_PATH = "metrics/metrics.prometheus" - SUPPORT_VERSION_PATH = "version.txt" - SUPPORT_FEATURES_PATH = "features.txt" - SUPPORT_OS_INFO_PATH = "osinfo.txt" - SUPPORT_PARSERS_PATH = "hub/parsers.txt" - SUPPORT_SCENARIOS_PATH = "hub/scenarios.txt" - SUPPORT_CONTEXTS_PATH = "hub/scenarios.txt" - SUPPORT_COLLECTIONS_PATH = "hub/collections.txt" - SUPPORT_POSTOVERFLOWS_PATH = "hub/postoverflows.txt" - SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" - SUPPORT_AGENTS_PATH = "lapi/agents.txt" - SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" - SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" - SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" - SUPPORT_ACQUISITION_CONFIG_BASE_PATH = "config/acquis/" - SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" - SUPPORT_CRASH_PATH = "crash/" + SUPPORT_METRICS_DIR = "metrics/" + SUPPORT_VERSION_PATH = "version.txt" + SUPPORT_FEATURES_PATH = "features.txt" + SUPPORT_OS_INFO_PATH = "osinfo.txt" + SUPPORT_HUB_DIR = "hub/" + SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" + SUPPORT_AGENTS_PATH = "lapi/agents.txt" + SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" + SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" + SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" + SUPPORT_ACQUISITION_DIR = "config/acquis/" + SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" + SUPPORT_CRASH_DIR = "crash/" + SUPPORT_LOG_DIR = "log/" + SUPPORT_PPROF_DIR = "pprof/" ) +// StringHook collects log entries in a string +type StringHook struct { + LogBuilder strings.Builder + LogLevels []log.Level +} + +func (hook *StringHook) Levels() []log.Level { + return hook.LogLevels +} + +func (hook *StringHook) Fire(entry *log.Entry) error { + logEntry, err := entry.String() + if err != nil { + return err + } + + hook.LogBuilder.WriteString(logEntry) + + return nil +} + // from https://github.com/acarl005/stripansi var reStripAnsi = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))") @@ -61,75 +76,76 @@ func stripAnsiString(str string) string { return reStripAnsi.ReplaceAllString(str, "") } -func collectMetrics() ([]byte, []byte, error) { +func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { log.Info("Collecting prometheus metrics") - if csConfig.Cscli.PrometheusUrl == "" { - log.Warn("No Prometheus URL configured, metrics will not be collected") - return nil, nil, errors.New("prometheus_uri is not set") + cfg := cli.cfg() + + if cfg.Cscli.PrometheusUrl == "" { + log.Warn("can't collect metrics: prometheus_uri is not set") } - humanMetrics := bytes.NewBuffer(nil) + humanMetrics := new(bytes.Buffer) ms := NewMetricStore() - if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil { - return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %w", err) + if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + return err } if err := ms.Format(humanMetrics, nil, "human", false); err != nil { - return nil, nil, err + return fmt.Errorf("could not format prometheus metrics: %w", err) } - req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, cfg.Cscli.PrometheusUrl, nil) if err != nil { - return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %w", err) + return fmt.Errorf("could not create request to prometheus endpoint: %w", err) } client := &http.Client{} resp, err := client.Do(req) if err != nil { - return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %w", err) + return fmt.Errorf("could not get metrics from prometheus endpoint: %w", err) } defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %w", err) - } + cli.writeToZip(zw, SUPPORT_METRICS_DIR+"metrics.prometheus", time.Now(), resp.Body) - return humanMetrics.Bytes(), body, nil + stripped := stripAnsiString(humanMetrics.String()) + + cli.writeToZip(zw, SUPPORT_METRICS_DIR+"metrics.human", time.Now(), strings.NewReader(stripped)) + + return nil } -func collectVersion() []byte { +func (cli *cliSupport) dumpVersion(zw *zip.Writer) { log.Info("Collecting version") - return []byte(cwversion.ShowStr()) + + cli.writeToZip(zw, SUPPORT_VERSION_PATH, time.Now(), strings.NewReader(cwversion.ShowStr())) } -func collectFeatures() []byte { +func (cli *cliSupport) dumpFeatures(zw *zip.Writer) { log.Info("Collecting feature flags") - enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() - - w := bytes.NewBuffer(nil) - for _, k := range enabledFeatures { - fmt.Fprintf(w, "%s\n", k) + w := new(bytes.Buffer) + for _, k := range fflag.Crowdsec.GetEnabledFeatures() { + fmt.Fprintln(w, k) } - return w.Bytes() + cli.writeToZip(zw, SUPPORT_FEATURES_PATH, time.Now(), w) } -func collectOSInfo() ([]byte, error) { +func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { log.Info("Collecting OS info") info, err := osinfo.GetOSInfo() if err != nil { - return nil, err + return err } - w := bytes.NewBuffer(nil) + w := new(bytes.Buffer) fmt.Fprintf(w, "Architecture: %s\n", info.Architecture) fmt.Fprintf(w, "Family: %s\n", info.Family) fmt.Fprintf(w, "ID: %s\n", info.ID) @@ -138,155 +154,251 @@ func collectOSInfo() ([]byte, error) { fmt.Fprintf(w, "Version: %s\n", info.Version) fmt.Fprintf(w, "Build: %s\n", info.Build) - return w.Bytes(), nil + cli.writeToZip(zw, SUPPORT_OS_INFO_PATH, time.Now(), w) + + return nil } -func collectHubItems(hub *cwhub.Hub, itemType string) []byte { +func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType string) error { var err error - out := bytes.NewBuffer(nil) + out := new(bytes.Buffer) - log.Infof("Collecting %s list", itemType) + log.Infof("Collecting hub: %s", itemType) items := make(map[string][]*cwhub.Item) if items[itemType], err = selectItems(hub, itemType, nil, true); err != nil { - log.Warnf("could not collect %s list: %s", itemType, err) + return fmt.Errorf("could not collect %s list: %w", itemType, err) } if err := listItems(out, []string{itemType}, items, false, "human"); err != nil { - log.Warnf("could not collect %s list: %s", itemType, err) + return fmt.Errorf("could not list %s: %w", itemType, err) } - return out.Bytes() + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_HUB_DIR+itemType+".txt", time.Now(), strings.NewReader(stripped)) + + return nil } -func collectBouncers(dbClient *database.Client) ([]byte, error) { - out := bytes.NewBuffer(nil) +func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { + log.Info("Collecting bouncers") + + if db == nil { + return errors.New("no database connection") + } + + out := new(bytes.Buffer) - bouncers, err := dbClient.ListBouncers() + bouncers, err := db.ListBouncers() if err != nil { - return nil, fmt.Errorf("unable to list bouncers: %w", err) + return fmt.Errorf("unable to list bouncers: %w", err) } getBouncersTable(out, bouncers) - return out.Bytes(), nil + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_BOUNCERS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil } -func collectAgents(dbClient *database.Client) ([]byte, error) { - out := bytes.NewBuffer(nil) +func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { + log.Info("Collecting agents") + + if db == nil { + return errors.New("no database connection") + } - machines, err := dbClient.ListMachines() + out := new(bytes.Buffer) + + machines, err := db.ListMachines() if err != nil { - return nil, fmt.Errorf("unable to list machines: %w", err) + return fmt.Errorf("unable to list machines: %w", err) } getAgentsTable(out, machines) - return out.Bytes(), nil + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_AGENTS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil } -func collectAPIStatus(login string, password string, endpoint string, prefix string, hub *cwhub.Hub) []byte { - if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { - return []byte("No agent credentials found, are we LAPI ?") - } +func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { + log.Info("Collecting LAPI status") - pwd := strfmt.Password(password) + cfg := cli.cfg() + cred := cfg.API.Client.Credentials - apiurl, err := url.Parse(endpoint) - if err != nil { - return []byte(fmt.Sprintf("cannot parse API URL: %s", err)) - } + out := new(bytes.Buffer) - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return []byte(fmt.Sprintf("could not collect scenarios: %s", err)) - } + fmt.Fprintf(out, "LAPI credentials file: %s\n", cfg.API.Client.CredentialsFilePath) + fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) + fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - Client, err = apiclient.NewDefaultClient(apiurl, - prefix, - fmt.Sprintf("crowdsec/%s", version.String()), - nil) - if err != nil { - return []byte(fmt.Sprintf("could not init client: %s", err)) + if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) } - t := models.WatcherAuthRequest{ - MachineID: &login, - Password: &pwd, - Scenarios: scenarios, - } + fmt.Fprintln(out, "You can successfully interact with Local API (LAPI)") - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) - if err != nil { - return []byte(fmt.Sprintf("Could not authenticate to API: %s", err)) - } else { - return []byte("Successfully authenticated to LAPI") + cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), out) + + return nil +} + +func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { + log.Info("Collecting CAPI status") + + cfg := cli.cfg() + cred := cfg.API.Server.OnlineClient.Credentials + + out := new(bytes.Buffer) + + fmt.Fprintf(out, "CAPI credentials file: %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) + fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) + fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) + + if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) } + + fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + + cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) + + return nil } -func collectCrowdsecConfig() []byte { +func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { log.Info("Collecting crowdsec config") - config, err := os.ReadFile(*csConfig.FilePath) + cfg := cli.cfg() + + config, err := os.ReadFile(*cfg.FilePath) if err != nil { - return []byte(fmt.Sprintf("could not read config file: %s", err)) + return fmt.Errorf("could not read config file: %w", err) } r := regexp.MustCompile(`(\s+password:|\s+user:|\s+host:)\s+.*`) - return r.ReplaceAll(config, []byte("$1 ****REDACTED****")) + redacted := r.ReplaceAll(config, []byte("$1 ****REDACTED****")) + + cli.writeToZip(zw, SUPPORT_CROWDSEC_CONFIG_PATH, time.Now(), bytes.NewReader(redacted)) + + return nil } -func collectCrowdsecProfile() []byte { - log.Info("Collecting crowdsec profile") +func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, endpoint string) error { + log.Infof("Collecting pprof/%s data", endpoint) + + ctx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext( + ctx, + http.MethodGet, + fmt.Sprintf( + "http://%s/debug/pprof/%s?debug=1", + net.JoinHostPort( + csConfig.Prometheus.ListenAddr, + strconv.Itoa(csConfig.Prometheus.ListenPort), + ), + endpoint, + ), + nil, + ) + if err != nil { + return fmt.Errorf("could not create request to pprof endpoint: %w", err) + } - config, err := os.ReadFile(csConfig.API.Server.ProfilesPath) + client := &http.Client{} + + resp, err := client.Do(req) if err != nil { - return []byte(fmt.Sprintf("could not read profile file: %s", err)) + return fmt.Errorf("could not get pprof data from endpoint: %w", err) } - return config + defer resp.Body.Close() + + cli.writeToZip(zw, SUPPORT_PPROF_DIR+endpoint+".pprof", time.Now(), resp.Body) + + return nil } -func collectAcquisitionConfig() map[string][]byte { +func (cli *cliSupport) dumpProfiles(zw *zip.Writer) { + log.Info("Collecting crowdsec profile") + + cfg := cli.cfg() + cli.writeFileToZip(zw, SUPPORT_CROWDSEC_PROFILE_PATH, cfg.API.Server.ProfilesPath) +} + +func (cli *cliSupport) dumpAcquisitionConfig(zw *zip.Writer) { log.Info("Collecting acquisition config") - ret := make(map[string][]byte) + cfg := cli.cfg() - for _, filename := range csConfig.Crowdsec.AcquisitionFiles { - fileContent, err := os.ReadFile(filename) - if err != nil { - ret[filename] = []byte(fmt.Sprintf("could not read file: %s", err)) - } else { - ret[filename] = fileContent - } + for _, filename := range cfg.Crowdsec.AcquisitionFiles { + fname := strings.ReplaceAll(filename, string(filepath.Separator), "___") + cli.writeFileToZip(zw, SUPPORT_ACQUISITION_DIR+fname, filename) + } +} + +func (cli *cliSupport) dumpLogs(zw *zip.Writer) error { + log.Info("Collecting CrowdSec logs") + + cfg := cli.cfg() + + logDir := cfg.Common.LogDir + + logFiles, err := filepath.Glob(filepath.Join(logDir, "crowdsec*.log")) + if err != nil { + return fmt.Errorf("could not list log files: %w", err) } - return ret + for _, filename := range logFiles { + cli.writeFileToZip(zw, SUPPORT_LOG_DIR+filepath.Base(filename), filename) + } + + return nil } -func collectCrash() ([]string, error) { +func (cli *cliSupport) dumpCrash(zw *zip.Writer) error { log.Info("Collecting crash dumps") - return trace.List() + + traceFiles, err := trace.List() + if err != nil { + return fmt.Errorf("could not list crash dumps: %w", err) + } + + for _, filename := range traceFiles { + cli.writeFileToZip(zw, SUPPORT_CRASH_DIR+filepath.Base(filename), filename) + } + + return nil } -type cliSupport struct{} +type cliSupport struct { + cfg configGetter +} -func NewCLISupport() *cliSupport { - return &cliSupport{} +func NewCLISupport(cfg configGetter) *cliSupport { + return &cliSupport{ + cfg: cfg, + } } -func (cli cliSupport) NewCommand() *cobra.Command { +func (cli *cliSupport) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "support [action]", Short: "Provide commands to help during support", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - return nil - }, } cmd.AddCommand(cli.NewDumpCmd()) @@ -294,198 +406,217 @@ func (cli cliSupport) NewCommand() *cobra.Command { return cmd } -func (cli cliSupport) NewDumpCmd() *cobra.Command { - var outFile string +// writeToZip adds a file to the zip archive, from a reader +func (cli *cliSupport) writeToZip(zipWriter *zip.Writer, filename string, mtime time.Time, reader io.Reader) { + header := &zip.FileHeader{ + Name: filename, + Method: zip.Deflate, + Modified: mtime, + } - cmd := &cobra.Command{ - Use: "dump", - Short: "Dump all your configuration to a zip file for easier support", - Long: `Dump the following informations: -- Crowdsec version -- OS version -- Installed collections list -- Installed parsers list -- Installed scenarios list -- Installed postoverflows list -- Installed context list -- Bouncers list -- Machines list -- CAPI status -- LAPI status -- Crowdsec config (sensitive information like username and password are redacted) -- Crowdsec metrics`, - Example: `cscli support dump -cscli support dump -f /tmp/crowdsec-support.zip -`, - Args: cobra.NoArgs, - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - var err error - var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool - infos := map[string][]byte{ - SUPPORT_VERSION_PATH: collectVersion(), - SUPPORT_FEATURES_PATH: collectFeatures(), - } + fw, err := zipWriter.CreateHeader(header) + if err != nil { + log.Errorf("could not add zip entry for %s: %s", filename, err) + return + } - if outFile == "" { - outFile = "/tmp/crowdsec-support.zip" - } + _, err = io.Copy(fw, reader) + if err != nil { + log.Errorf("could not write zip entry for %s: %s", filename, err) + } +} - dbClient, err := database.NewClient(csConfig.DbConfig) - if err != nil { - log.Warnf("Could not connect to database: %s", err) - skipDB = true - infos[SUPPORT_BOUNCERS_PATH] = []byte(err.Error()) - infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) - } +// writeToZip adds a file to the zip archive, from a file, and retains the mtime +func (cli *cliSupport) writeFileToZip(zw *zip.Writer, filename string, fromFile string) { + mtime := time.Now() - if err = csConfig.LoadAPIServer(true); err != nil { - log.Warnf("could not load LAPI, skipping CAPI check") - skipLAPI = true - infos[SUPPORT_CAPI_STATUS_PATH] = []byte(err.Error()) - } + fi, err := os.Stat(fromFile) + if err == nil { + mtime = fi.ModTime() + } - if err = csConfig.LoadCrowdsec(); err != nil { - log.Warnf("could not load agent config, skipping crowdsec config check") - skipAgent = true - } + fin, err := os.Open(fromFile) + if err != nil { + log.Errorf("could not open file %s: %s", fromFile, err) + return + } + defer fin.Close() - hub, err := require.Hub(csConfig, nil, nil) - if err != nil { - log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") - skipHub = true - infos[SUPPORT_PARSERS_PATH] = []byte(err.Error()) - infos[SUPPORT_SCENARIOS_PATH] = []byte(err.Error()) - infos[SUPPORT_POSTOVERFLOWS_PATH] = []byte(err.Error()) - infos[SUPPORT_CONTEXTS_PATH] = []byte(err.Error()) - infos[SUPPORT_COLLECTIONS_PATH] = []byte(err.Error()) - } + cli.writeToZip(zw, filename, mtime, fin) +} - if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { - log.Warn("no agent credentials found, skipping LAPI connectivity check") - if _, ok := infos[SUPPORT_LAPI_STATUS_PATH]; ok { - infos[SUPPORT_LAPI_STATUS_PATH] = append(infos[SUPPORT_LAPI_STATUS_PATH], []byte("\nNo LAPI credentials found")...) - } - skipLAPI = true - } +func (cli *cliSupport) dump(ctx context.Context, outFile string) error { + var skipCAPI, skipLAPI, skipAgent bool - if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient == nil || csConfig.API.Server.OnlineClient.Credentials == nil { - log.Warn("no CAPI credentials found, skipping CAPI connectivity check") - skipCAPI = true - } + collector := &StringHook{ + LogLevels: log.AllLevels, + } + log.AddHook(collector) - infos[SUPPORT_METRICS_HUMAN_PATH], infos[SUPPORT_METRICS_PROMETHEUS_PATH], err = collectMetrics() - if err != nil { - log.Warnf("could not collect prometheus metrics information: %s", err) - infos[SUPPORT_METRICS_HUMAN_PATH] = []byte(err.Error()) - infos[SUPPORT_METRICS_PROMETHEUS_PATH] = []byte(err.Error()) - } + cfg := cli.cfg() - infos[SUPPORT_OS_INFO_PATH], err = collectOSInfo() - if err != nil { - log.Warnf("could not collect OS information: %s", err) - infos[SUPPORT_OS_INFO_PATH] = []byte(err.Error()) - } + if outFile == "" { + outFile = filepath.Join(os.TempDir(), "crowdsec-support.zip") + } - infos[SUPPORT_CROWDSEC_CONFIG_PATH] = collectCrowdsecConfig() + w := bytes.NewBuffer(nil) + zipWriter := zip.NewWriter(w) - if !skipHub { - infos[SUPPORT_PARSERS_PATH] = collectHubItems(hub, cwhub.PARSERS) - infos[SUPPORT_SCENARIOS_PATH] = collectHubItems(hub, cwhub.SCENARIOS) - infos[SUPPORT_POSTOVERFLOWS_PATH] = collectHubItems(hub, cwhub.POSTOVERFLOWS) - infos[SUPPORT_CONTEXTS_PATH] = collectHubItems(hub, cwhub.POSTOVERFLOWS) - infos[SUPPORT_COLLECTIONS_PATH] = collectHubItems(hub, cwhub.COLLECTIONS) - } + db, err := database.NewClient(cfg.DbConfig) + if err != nil { + log.Warnf("Could not connect to database: %s", err) + } - if !skipDB { - infos[SUPPORT_BOUNCERS_PATH], err = collectBouncers(dbClient) - if err != nil { - log.Warnf("could not collect bouncers information: %s", err) - infos[SUPPORT_BOUNCERS_PATH] = []byte(err.Error()) - } - - infos[SUPPORT_AGENTS_PATH], err = collectAgents(dbClient) - if err != nil { - log.Warnf("could not collect agents information: %s", err) - infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) - } - } + if err = cfg.LoadAPIServer(true); err != nil { + log.Warnf("could not load LAPI, skipping CAPI check") - if !skipCAPI { - log.Info("Collecting CAPI status") - infos[SUPPORT_CAPI_STATUS_PATH] = collectAPIStatus(csConfig.API.Server.OnlineClient.Credentials.Login, - csConfig.API.Server.OnlineClient.Credentials.Password, - csConfig.API.Server.OnlineClient.Credentials.URL, - CAPIURLPrefix, - hub) - } + skipCAPI = true + } - if !skipLAPI { - log.Info("Collection LAPI status") - infos[SUPPORT_LAPI_STATUS_PATH] = collectAPIStatus(csConfig.API.Client.Credentials.Login, - csConfig.API.Client.Credentials.Password, - csConfig.API.Client.Credentials.URL, - LAPIURLPrefix, - hub) - infos[SUPPORT_CROWDSEC_PROFILE_PATH] = collectCrowdsecProfile() - } + if err = cfg.LoadCrowdsec(); err != nil { + log.Warnf("could not load agent config, skipping crowdsec config check") - if !skipAgent { - acquis := collectAcquisitionConfig() + skipAgent = true + } - for filename, content := range acquis { - fname := strings.ReplaceAll(filename, string(filepath.Separator), "___") - infos[SUPPORT_ACQUISITION_CONFIG_BASE_PATH+fname] = content - } - } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") + // XXX: lapi status check requires scenarios, will return an error + } - crash, err := collectCrash() - if err != nil { - log.Errorf("could not collect crash dumps: %s", err) - } + if cfg.API.Client == nil || cfg.API.Client.Credentials == nil { + log.Warn("no agent credentials found, skipping LAPI connectivity check") - for _, filename := range crash { - content, err := os.ReadFile(filename) - if err != nil { - log.Errorf("could not read crash dump %s: %s", filename, err) - } + skipLAPI = true + } - infos[SUPPORT_CRASH_PATH+filepath.Base(filename)] = content - } + if cfg.API.Server == nil || cfg.API.Server.OnlineClient == nil || cfg.API.Server.OnlineClient.Credentials == nil { + log.Warn("no CAPI credentials found, skipping CAPI connectivity check") - w := bytes.NewBuffer(nil) - zipWriter := zip.NewWriter(w) - - for filename, data := range infos { - header := &zip.FileHeader{ - Name: filename, - Method: zip.Deflate, - // TODO: retain mtime where possible (esp. trace) - Modified: time.Now(), - } - fw, err := zipWriter.CreateHeader(header) - if err != nil { - log.Errorf("Could not add zip entry for %s: %s", filename, err) - continue - } - fw.Write([]byte(stripAnsiString(string(data)))) - } + skipCAPI = true + } - err = zipWriter.Close() - if err != nil { - return fmt.Errorf("could not finalize zip file: %s", err) - } + if err = cli.dumpMetrics(ctx, zipWriter); err != nil { + log.Warn(err) + } - if outFile == "-" { - _, err = os.Stdout.Write(w.Bytes()) - return err - } - err = os.WriteFile(outFile, w.Bytes(), 0o600) - if err != nil { - return fmt.Errorf("could not write zip file to %s: %s", outFile, err) + if err = cli.dumpOSInfo(zipWriter); err != nil { + log.Warnf("could not collect OS information: %s", err) + } + + if err = cli.dumpConfigYAML(zipWriter); err != nil { + log.Warnf("could not collect main config file: %s", err) + } + + if hub != nil { + for _, itemType := range cwhub.ItemTypes { + if err = cli.dumpHubItems(zipWriter, hub, itemType); err != nil { + log.Warnf("could not collect %s information: %s", itemType, err) } - log.Infof("Written zip file to %s", outFile) - return nil + } + } + + if err = cli.dumpBouncers(zipWriter, db); err != nil { + log.Warnf("could not collect bouncers information: %s", err) + } + + if err = cli.dumpAgents(zipWriter, db); err != nil { + log.Warnf("could not collect agents information: %s", err) + } + + if !skipCAPI { + if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { + log.Warnf("could not collect CAPI status: %s", err) + } + } + + if !skipLAPI { + if err = cli.dumpLAPIStatus(zipWriter, hub); err != nil { + log.Warnf("could not collect LAPI status: %s", err) + } + + // call pprof separately, one might fail for timeout + + if err = cli.dumpPprof(ctx, zipWriter, "goroutine"); err != nil { + log.Warnf("could not collect pprof goroutine data: %s", err) + } + + if err = cli.dumpPprof(ctx, zipWriter, "heap"); err != nil { + log.Warnf("could not collect pprof heap data: %s", err) + } + + if err = cli.dumpPprof(ctx, zipWriter, "profile"); err != nil { + log.Warnf("could not collect pprof cpu data: %s", err) + } + + cli.dumpProfiles(zipWriter) + } + + if !skipAgent { + cli.dumpAcquisitionConfig(zipWriter) + } + + if err = cli.dumpCrash(zipWriter); err != nil { + log.Warnf("could not collect crash dumps: %s", err) + } + + if err = cli.dumpLogs(zipWriter); err != nil { + log.Warnf("could not collect log files: %s", err) + } + + cli.dumpVersion(zipWriter) + cli.dumpFeatures(zipWriter) + + // log of the dump process, without color codes + collectedOutput := stripAnsiString(collector.LogBuilder.String()) + + cli.writeToZip(zipWriter, "dump.log", time.Now(), strings.NewReader(collectedOutput)) + + err = zipWriter.Close() + if err != nil { + return fmt.Errorf("could not finalize zip file: %w", err) + } + + if outFile == "-" { + _, err = os.Stdout.Write(w.Bytes()) + return err + } + + err = os.WriteFile(outFile, w.Bytes(), 0o600) + if err != nil { + return fmt.Errorf("could not write zip file to %s: %w", outFile, err) + } + + log.Infof("Written zip file to %s", outFile) + + return nil +} + +func (cli *cliSupport) NewDumpCmd() *cobra.Command { + var outFile string + + cmd := &cobra.Command{ + Use: "dump", + Short: "Dump all your configuration to a zip file for easier support", + Long: `Dump the following information: +- Crowdsec version +- OS version +- Enabled feature flags +- Latest Crowdsec logs (log processor, LAPI, remediation components) +- Installed collections, parsers, scenarios... +- Bouncers and machines list +- CAPI/LAPI status +- Crowdsec config (sensitive information like username and password are redacted) +- Crowdsec metrics +- Stack trace in case of process crash`, + Example: `cscli support dump +cscli support dump -f /tmp/crowdsec-support.zip +`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.dump(cmd.Context(), outFile) }, } diff --git a/pkg/database/database.go b/pkg/database/database.go index 96a495f6731..357077e7d6f 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -68,7 +68,7 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { typ, dia, err := config.ConnectionDialect() if err != nil { - return nil, err // unsupported database caught here + return nil, err //unsupported database caught here } if config.Type == "sqlite" { diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 4c7ce7fbc2c..7e74f6f9714 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -263,7 +263,7 @@ teardown() { rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) - assert_output 'parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { From 0d9e00054320df3ebb560a600c33db2939656954 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 13:44:17 +0200 Subject: [PATCH 117/318] use go-cs-lib/downloader for data files, hub index, item YAML (#2926) --- cmd/crowdsec-cli/itemcli.go | 13 ++- go.mod | 2 +- go.sum | 4 +- pkg/cwhub/cwhub.go | 2 +- pkg/cwhub/dataset.go | 159 +++++----------------------- pkg/cwhub/dataset_test.go | 56 ---------- pkg/cwhub/hub.go | 21 +--- pkg/cwhub/hub_test.go | 6 +- pkg/cwhub/item.go | 6 +- pkg/cwhub/iteminstall.go | 11 +- pkg/cwhub/iteminstall_test.go | 3 +- pkg/cwhub/itemupgrade.go | 146 ++++++++++--------------- pkg/cwhub/remote.go | 39 ++++--- pkg/cwhub/sync.go | 24 +---- test/bats/20_hub.bats | 8 +- test/bats/20_hub_collections.bats | 1 - test/bats/20_hub_parsers.bats | 1 - test/bats/20_hub_postoverflows.bats | 1 - test/bats/20_hub_scenarios.bats | 1 - 19 files changed, 151 insertions(+), 353 deletions(-) delete mode 100644 pkg/cwhub/dataset_test.go diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 44a734d5e6a..a72cfa735c5 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -475,11 +475,22 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } - latestContent, remoteURL, err := item.FetchLatest() + dest, err := os.CreateTemp("", "cscli-diff-*") + if err != nil { + return "", fmt.Errorf("while creating temporary file: %w", err) + } + defer os.Remove(dest.Name()) + + _, remoteURL, err := item.FetchContentTo(dest.Name()) if err != nil { return "", err } + latestContent, err := os.ReadFile(dest.Name()) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) + } + localContent, err := os.ReadFile(item.State.LocalPath) if err != nil { return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) diff --git a/go.mod b/go.mod index d78908db0c6..2aae7f2a857 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.10 + github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 29af3c795fc..ebf10e598fe 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.10 h1:Twt/y/rYCUspGY1zxDnGurL2svRSREAz+2+puLepd9c= -github.com/crowdsecurity/go-cs-lib v0.0.10/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 h1:Dd/T2IMB3KG1+dvm0LUuT9AKKnT2CO0sCjFfsKDIxXE= +github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index a7864d4c076..0496834e472 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -23,7 +23,7 @@ func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { // hubClient is the HTTP client used to communicate with the CrowdSec Hub. var hubClient = &http.Client{ - Timeout: 120 * time.Second, + Timeout: 120 * time.Second, Transport: &hubTransport{http.DefaultTransport}, } diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 921361e3fcf..eb56d8e32a8 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -1,19 +1,17 @@ package cwhub import ( + "context" "errors" "fmt" "io" - "io/fs" - "net/http" - "os" - "path/filepath" - "runtime" "time" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/downloader" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -22,128 +20,6 @@ type DataSet struct { Data []types.DataSource `yaml:"data,omitempty"` } -// downloadFile downloads a file and writes it to disk, with no hash verification. -func downloadFile(url string, destPath string) error { - resp, err := hubClient.Get(url) - if err != nil { - return fmt.Errorf("while downloading %s: %w", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) - } - - // Download to a temporary location to avoid corrupting files - // that are currently in use or memory mapped. - - tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp") - if err != nil { - return err - } - - tmpFileName := tmpFile.Name() - defer func() { - tmpFile.Close() - os.Remove(tmpFileName) - }() - - // avoid reading the whole file in memory - _, err = io.Copy(tmpFile, resp.Body) - if err != nil { - return err - } - - if err = tmpFile.Sync(); err != nil { - return err - } - - if err = tmpFile.Close(); err != nil { - return err - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", filepath.Base(destPath)) - - if runtime.GOOS == "windows" { - // On Windows, rename will fail if the destination file already exists - // so we remove it first. - err = os.Remove(destPath) - switch { - case errors.Is(err, fs.ErrNotExist): - break - case err != nil: - return err - } - } - - if err = os.Rename(tmpFileName, destPath); err != nil { - return err - } - - return nil -} - -// needsUpdate checks if a data file has to be downloaded (or updated). -// if the local file doesn't exist, update. -// if the remote is newer than the local file, update. -// if the remote has no modification date, but local file has been modified > a week ago, update. -func needsUpdate(destPath string, url string, logger *logrus.Logger) bool { - fileInfo, err := os.Stat(destPath) - - switch { - case os.IsNotExist(err): - return true - case err != nil: - logger.Errorf("while getting %s: %s", destPath, err) - return true - } - - resp, err := hubClient.Head(url) - if err != nil { - logger.Errorf("while getting %s: %s", url, err) - // Head failed, Get would likely fail too -> no update - return false - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - logger.Errorf("bad http code %d for %s", resp.StatusCode, url) - return false - } - - // update if local file is older than this - shelfLife := 7 * 24 * time.Hour - - lastModify := fileInfo.ModTime() - - localIsOld := lastModify.Add(shelfLife).Before(time.Now()) - - remoteLastModified := resp.Header.Get("Last-Modified") - if remoteLastModified == "" { - if localIsOld { - logger.Infof("no last modified date for %s, but local file is older than %s", url, shelfLife) - } - - return localIsOld - } - - lastAvailable, err := time.Parse(time.RFC1123, remoteLastModified) - if err != nil { - logger.Warningf("while parsing last modified date for %s: %s", url, err) - return localIsOld - } - - if lastModify.Before(lastAvailable) { - logger.Infof("new version available, updating %s", destPath) - return true - } - - return false -} - // downloadDataSet downloads all the data files for an item. func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { dec := yaml.NewDecoder(reader) @@ -165,12 +41,31 @@ func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *lo return err } - if force || needsUpdate(destPath, dataS.SourceURL, logger) { - logger.Debugf("downloading %s in %s", dataS.SourceURL, destPath) + d := downloader. + New(). + WithHTTPClient(hubClient). + ToFile(destPath). + CompareContent(). + WithLogger(logrus.WithFields(logrus.Fields{"url": dataS.SourceURL})) + + if !force { + d = d.WithLastModified(). + WithShelfLife(7 * 24 * time.Hour) + } + + ctx := context.TODO() + + downloaded, err := d.Download(ctx, dataS.SourceURL) + if err != nil { + return fmt.Errorf("while getting data: %w", err) + } - if err := downloadFile(dataS.SourceURL, destPath); err != nil { - return fmt.Errorf("while getting data: %w", err) - } + if downloaded { + logger.Infof("Downloaded %s", destPath) + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("updated %s\n", destPath) } } } diff --git a/pkg/cwhub/dataset_test.go b/pkg/cwhub/dataset_test.go deleted file mode 100644 index e48202e4821..00000000000 --- a/pkg/cwhub/dataset_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package cwhub - -import ( - "io" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/crowdsecurity/go-cs-lib/cstest" -) - -func TestDownloadFile(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/xx": - w.WriteHeader(http.StatusOK) - _, _ = io.WriteString(w, "example content oneoneone") - default: - w.WriteHeader(http.StatusNotFound) - _, _ = io.WriteString(w, "not found") - } - })) - defer ts.Close() - - dest := filepath.Join(t.TempDir(), "example.txt") - defer os.Remove(dest) - - err := downloadFile(ts.URL+"/xx", dest) - require.NoError(t, err) - - content, err := os.ReadFile(dest) - assert.Equal(t, "example content oneoneone", string(content)) - require.NoError(t, err) - - // bad uri - err = downloadFile("https://zz.com", dest) - cstest.RequireErrorContains(t, err, "lookup zz.com") - cstest.RequireErrorContains(t, err, "no such host") - - // 404 - err = downloadFile(ts.URL+"/x", dest) - cstest.RequireErrorContains(t, err, "bad http code 404") - - // bad target - err = downloadFile(ts.URL+"/xx", "") - cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) - - // destination directory does not exist - err = downloadFile(ts.URL+"/xx", filepath.Join(t.TempDir(), "missing/example.txt")) - cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) -} diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 6b9f56b2e17..87a6644bc72 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -1,7 +1,6 @@ package cwhub import ( - "bytes" "encoding/json" "errors" "fmt" @@ -21,8 +20,8 @@ type Hub struct { items HubItems // Items read from HubDir and InstallDir local *csconfig.LocalHubCfg remote *RemoteHubCfg - Warnings []string // Warnings encountered during sync logger *logrus.Logger + Warnings []string // Warnings encountered during sync } // GetDataDir returns the data directory, where data sets are installed. @@ -150,27 +149,17 @@ func (h *Hub) ItemStats() []string { // updateIndex downloads the latest version of the index and writes it to disk if it changed. func (h *Hub) updateIndex() error { - body, err := h.remote.fetchIndex() + downloaded, err := h.remote.fetchIndex(h.local.HubIndexFile) if err != nil { return err } - oldContent, err := os.ReadFile(h.local.HubIndexFile) - if err != nil { - if !os.IsNotExist(err) { - h.logger.Warningf("failed to read hub index: %s", err) - } - } else if bytes.Equal(body, oldContent) { + if downloaded { + h.logger.Infof("Wrote index to %s", h.local.HubIndexFile) + } else { h.logger.Info("hub index is up to date") - return nil - } - - if err = os.WriteFile(h.local.HubIndexFile, body, 0o644); err != nil { - return fmt.Errorf("failed to write hub index: %w", err) } - h.logger.Infof("Wrote index to %s, %d bytes", h.local.HubIndexFile, len(body)) - return nil } diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 86569cde324..d5592a16c39 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -29,6 +29,10 @@ func TestUpdateIndex(t *testing.T) { tmpIndex, err := os.CreateTemp("", "index.json") require.NoError(t, err) + // close the file to avoid preventing the rename on windows + err = tmpIndex.Close() + require.NoError(t, err) + t.Cleanup(func() { os.Remove(tmpIndex.Name()) }) @@ -72,5 +76,5 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" err = hub.updateIndex() - cstest.RequireErrorContains(t, err, "failed to write hub index: open /does/not/exist/index.json:") + cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 6cdb5cadcb9..4249a20e134 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -29,10 +29,8 @@ const ( versionFuture // local version is higher latest, but is included in the index: should not happen ) -var ( - // The order is important, as it is used to range over sub-items in collections. - ItemTypes = []string{PARSERS, POSTOVERFLOWS, SCENARIOS, CONTEXTS, APPSEC_CONFIGS, APPSEC_RULES, COLLECTIONS} -) +// The order is important, as it is used to range over sub-items in collections. +var ItemTypes = []string{PARSERS, POSTOVERFLOWS, SCENARIOS, CONTEXTS, APPSEC_CONFIGS, APPSEC_RULES, COLLECTIONS} type HubItems map[string]map[string]*Item diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index ceae3649118..274e7128a04 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -48,13 +48,13 @@ func (i *Item) Install(force bool, downloadOnly bool) error { } } - filePath, err := i.downloadLatest(force, true) + downloaded, err := i.downloadLatest(force, true) if err != nil { return err } - if downloadOnly { - i.hub.logger.Infof("Downloaded %s to %s", i.Name, filePath) + if downloadOnly && downloaded { + i.hub.logger.Infof("Downloaded %s", i.Name) return nil } @@ -62,6 +62,11 @@ func (i *Item) Install(force bool, downloadOnly bool) error { return fmt.Errorf("while enabling %s: %w", i.Name, err) } + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("installed %s\n", i.Name) + i.hub.logger.Infof("Enabled %s", i.Name) return nil diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 80a419ec5da..337f66f95fa 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -35,7 +35,8 @@ func testTaint(hub *Hub, t *testing.T, item *Item) { // truncate the file f, err := os.Create(item.State.LocalPath) require.NoError(t, err) - f.Close() + err = f.Close() + require.NoError(t, err) // Local sync and check status err = hub.localSync() diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 6a8dc2f44b6..8b3ec7481ef 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -3,23 +3,20 @@ package cwhub // Install, upgrade and remove items from the hub to the local configuration import ( - "bytes" - "crypto/sha256" - "encoding/hex" + "context" "errors" "fmt" - "io" - "net/http" "os" - "path/filepath" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) // Upgrade downloads and applies the last version of the item from the hub. func (i *Item) Upgrade(force bool) (bool, error) { - updated := false - if i.State.IsLocal() { i.hub.logger.Infof("not upgrading %s: local item", i.Name) return false, nil @@ -54,21 +51,21 @@ func (i *Item) Upgrade(force bool) (bool, error) { if i.State.Tainted { i.hub.logger.Warningf("%v %s is tainted, --force to overwrite", emoji.Warning, i.Name) } - } else { - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", i.Name) - i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) - updated = true + return false, nil } - return updated, nil + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("updated %s\n", i.Name) + i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) + + return true, nil } // downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (string, error) { +func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) for _, sub := range i.SubItems() { @@ -84,98 +81,84 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (string, error) { i.hub.logger.Tracef("collection, recurse") if _, err := sub.downloadLatest(overwrite, updateOnly); err != nil { - return "", err + return false, err } } downloaded := sub.State.Downloaded if _, err := sub.download(overwrite); err != nil { - return "", err + return false, err } // We need to enable an item when it has been added to a collection since latest release of the collection. // We check if sub.Downloaded is false because maybe the item has been disabled by the user. if !sub.State.Installed && !downloaded { if err := sub.enable(); err != nil { - return "", fmt.Errorf("enabling '%s': %w", sub.Name, err) + return false, fmt.Errorf("enabling '%s': %w", sub.Name, err) } } } if !i.State.Installed && updateOnly && i.State.Downloaded && !overwrite { i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - return "", nil - } - - ret, err := i.download(overwrite) - if err != nil { - return "", err + return false, nil } - return ret, nil + return i.download(overwrite) } -// FetchLatest downloads the latest item from the hub, verifies the hash and returns the content and the used url. -func (i *Item) FetchLatest() ([]byte, string, error) { - if i.latestHash() == "" { - return nil, "", errors.New("latest hash missing from index") - } - +// FetchContentTo downloads the last version of the item's YAML file to the specified path. +func (i *Item) FetchContentTo(destPath string) (bool, string, error) { url, err := i.hub.remote.urlTo(i.RemotePath) if err != nil { - return nil, "", fmt.Errorf("failed to build request: %w", err) - } - - resp, err := hubClient.Get(url) - if err != nil { - return nil, "", err + return false, "", fmt.Errorf("failed to build request: %w", err) } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, "", fmt.Errorf("bad http code %d", resp.StatusCode) + wantHash := i.latestHash() + if wantHash == "" { + return false, "", errors.New("latest hash missing from index") } - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, "", err - } + d := downloader. + New(). + WithHTTPClient(hubClient). + ToFile(destPath). + WithMakeDirs(true). + WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + CompareContent(). + VerifyHash("sha256", wantHash) - hash := sha256.New() - if _, err = hash.Write(body); err != nil { - return nil, "", fmt.Errorf("while hashing %s: %w", i.Name, err) - } + // TODO: recommend hub update if hash does not match - meow := hex.EncodeToString(hash.Sum(nil)) - if meow != i.Versions[i.Version].Digest { - i.hub.logger.Errorf("Downloaded version doesn't match index, please 'hub update'") - i.hub.logger.Debugf("got %s, expected %s", meow, i.Versions[i.Version].Digest) + ctx := context.TODO() - return nil, "", errors.New("invalid download hash") + downloaded, err := d.Download(ctx, url) + if err != nil { + return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err) } - return body, url, nil + return downloaded, url, nil } // download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(overwrite bool) (string, error) { +func (i *Item) download(overwrite bool) (bool, error) { // ensure that target file is within target dir finalPath, err := i.downloadPath() if err != nil { - return "", err + return false, err } if i.State.IsLocal() { i.hub.logger.Warningf("%s is local, can't download", i.Name) - return finalPath, nil + return false, nil } // if user didn't --force, don't overwrite local, tainted, up-to-date files if !overwrite { if i.State.Tainted { i.hub.logger.Debugf("%s: tainted, not updated", i.Name) - return "", nil + return false, nil } if i.State.UpToDate { @@ -184,45 +167,32 @@ func (i *Item) download(overwrite bool) (string, error) { } } - body, url, err := i.FetchLatest() + downloaded, _, err := i.FetchContentTo(finalPath) if err != nil { - what := i.Name - if url != "" { - what += " from " + url - } - - return "", fmt.Errorf("while downloading %s: %w", what, err) - } - - // all good, install - - parentDir := filepath.Dir(finalPath) - - if err = os.MkdirAll(parentDir, os.ModePerm); err != nil { - return "", fmt.Errorf("while creating %s: %w", parentDir, err) - } - - // check actual file - if _, err = os.Stat(finalPath); !os.IsNotExist(err) { - i.hub.logger.Warningf("%s: overwrite", i.Name) - i.hub.logger.Debugf("target: %s", finalPath) - } else { - i.hub.logger.Infof("%s: OK", i.Name) + return false, fmt.Errorf("while downloading %s: %w", i.Name, err) } - if err = os.WriteFile(finalPath, body, 0o644); err != nil { - return "", fmt.Errorf("while writing %s: %w", finalPath, err) + if downloaded { + i.hub.logger.Infof("Downloaded %s", i.Name) } i.State.Downloaded = true i.State.Tainted = false i.State.UpToDate = true - if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, bytes.NewReader(body), i.hub.logger); err != nil { - return "", fmt.Errorf("while downloading data for %s: %w", i.FileName, err) + // read content to get the list of data files + reader, err := os.Open(finalPath) + if err != nil { + return false, fmt.Errorf("while opening %s: %w", finalPath, err) + } + + defer reader.Close() + + if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { + return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) } - return finalPath, nil + return true, nil } // DownloadDataIfNeeded downloads the data set for the item. diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index c1eb5a7080f..5e42555fa61 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -1,9 +1,12 @@ package cwhub import ( + "context" "fmt" - "io" - "net/http" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" ) // RemoteHubCfg is used to retrieve index and items from the remote hub. @@ -28,34 +31,28 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { } // fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex() ([]byte, error) { +func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) { if r == nil { - return nil, ErrNilRemoteHub + return false, ErrNilRemoteHub } url, err := r.urlTo(r.IndexPath) if err != nil { - return nil, fmt.Errorf("failed to build hub index request: %w", err) + return false, fmt.Errorf("failed to build hub index request: %w", err) } - resp, err := hubClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed http request for hub index: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusNotFound { - return nil, IndexNotFoundError{url, r.Branch} - } - - return nil, fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) - } + ctx := context.TODO() - body, err := io.ReadAll(resp.Body) + downloaded, err := downloader. + New(). + WithHTTPClient(hubClient). + ToFile(destPath). + CompareContent(). + WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + Download(ctx, url) if err != nil { - return nil, fmt.Errorf("failed to read request answer for hub index: %w", err) + return false, err } - return body, nil + return downloaded, nil } diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index cb7bf37867c..42db255c883 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -1,10 +1,7 @@ package cwhub import ( - "crypto/sha256" - "encoding/hex" "fmt" - "io" "os" "path/filepath" "slices" @@ -12,6 +9,7 @@ import ( "strings" "github.com/Masterminds/semver/v3" + "github.com/crowdsecurity/go-cs-lib/downloader" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" ) @@ -38,29 +36,13 @@ func linkTarget(path string, logger *logrus.Logger) (string, error) { return hubpath, nil } -func getSHA256(filepath string) (string, error) { - f, err := os.Open(filepath) - if err != nil { - return "", fmt.Errorf("unable to open '%s': %w", filepath, err) - } - - defer f.Close() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return "", fmt.Errorf("unable to calculate sha256 of '%s': %w", filepath, err) - } - - return hex.EncodeToString(h.Sum(nil)), nil -} - // information used to create a new Item, from a file path. type itemFileInfo struct { - inhub bool fname string stage string ftype string fauthor string + inhub bool } func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo, error) { @@ -466,7 +448,7 @@ func (h *Hub) localSync() error { func (i *Item) setVersionState(path string, inhub bool) error { var err error - i.State.LocalHash, err = getSHA256(path) + i.State.LocalHash, err = downloader.SHA256(path) if err != nil { return fmt.Errorf("failed to get sha256 of %s: %w", path, err) } diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index 18e3770bcd0..0d9f29b2418 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -125,13 +125,19 @@ teardown() { assert_stderr --partial "Upgraded 0 contexts" assert_stderr --partial "Upgrading collections" assert_stderr --partial "Upgraded 0 collections" + assert_stderr --partial "Upgrading appsec-configs" + assert_stderr --partial "Upgraded 0 appsec-configs" + assert_stderr --partial "Upgrading appsec-rules" + assert_stderr --partial "Upgraded 0 appsec-rules" + assert_stderr --partial "Upgrading collections" + assert_stderr --partial "Upgraded 0 collections" rune -0 cscli parsers install crowdsecurity/syslog-logs rune -0 cscli hub upgrade assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" rune -0 cscli hub upgrade --force - assert_stderr --partial "crowdsecurity/syslog-logs: overwrite" + assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" assert_stderr --partial "crowdsecurity/syslog-logs: updated" assert_stderr --partial "Upgraded 1 parsers" # this is used by the cron script to know if the hub was updated diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats index 5e5b43a9e4f..1381fe8e55a 100644 --- a/test/bats/20_hub_collections.bats +++ b/test/bats/20_hub_collections.bats @@ -180,7 +180,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't enable unless --force" rune -0 cscli collections install crowdsecurity/sshd --force - assert_stderr --partial "crowdsecurity/sshd: overwrite" assert_stderr --partial "Enabled crowdsecurity/sshd" } diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats index 71a1f933a92..214463f9cfc 100644 --- a/test/bats/20_hub_parsers.bats +++ b/test/bats/20_hub_parsers.bats @@ -180,7 +180,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't enable unless --force" rune -0 cscli parsers install crowdsecurity/whitelists --force - assert_stderr --partial "crowdsecurity/whitelists: overwrite" assert_stderr --partial "Enabled crowdsecurity/whitelists" } diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats index de4b1e8a59e..5123966a44e 100644 --- a/test/bats/20_hub_postoverflows.bats +++ b/test/bats/20_hub_postoverflows.bats @@ -180,7 +180,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't enable unless --force" rune -0 cscli postoverflows install crowdsecurity/rdns --force - assert_stderr --partial "crowdsecurity/rdns: overwrite" assert_stderr --partial "Enabled crowdsecurity/rdns" } diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 9c441057aa2..3089e244cf1 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -181,7 +181,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't enable unless --force" rune -0 cscli scenarios install crowdsecurity/ssh-bf --force - assert_stderr --partial "crowdsecurity/ssh-bf: overwrite" assert_stderr --partial "Enabled crowdsecurity/ssh-bf" } From e5cd480425d6367f84cac845dbba9bc6daa241f3 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 13:49:19 +0200 Subject: [PATCH 118/318] update go-cs-lib to 0.0.11 (#2990) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2aae7f2a857..49cfe05ad39 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 + github.com/crowdsecurity/go-cs-lib v0.0.11 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ebf10e598fe..05b38f68384 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 h1:Dd/T2IMB3KG1+dvm0LUuT9AKKnT2CO0sCjFfsKDIxXE= -github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.11 h1:ygUOKrkMLaJ2wjC020LgtY6XDkToNFK4NmYlhpkk5ko= +github.com/crowdsecurity/go-cs-lib v0.0.11/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= From defa0767940fc6e157fd620a9d9be24b691671ce Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 15:09:22 +0200 Subject: [PATCH 119/318] CI: disable log formatting for consistency (#2989) * CI: disable log formatting for consistency * lint (shellharden) --- test/bats/01_crowdsec.bats | 40 +++++++++--------- test/bats/01_crowdsec_lapi.bats | 8 ++-- test/bats/01_cscli.bats | 40 +++++++++--------- test/bats/02_nolapi.bats | 12 +++--- test/bats/03_noagent.bats | 12 +++--- test/bats/04_nocapi.bats | 10 ++--- test/bats/05_config_yaml_local.bats | 22 +++++----- test/bats/11_bouncers_tls.bats | 2 +- test/bats/13_capi_whitelists.bats | 14 +++---- test/bats/20_hub_collections_dep.bats | 2 +- test/bats/30_machines_tls.bats | 8 ++-- test/bats/40_cold-logs.bats | 6 +-- test/bats/40_live-ban.bats | 12 +++--- test/bats/50_simulation.bats | 10 ++--- test/bats/70_plugin_http.bats | 14 +++---- test/bats/71_plugin_dummy.bats | 10 ++--- test/bats/72_plugin_badconfig.bats | 60 +++++++++++++-------------- test/bats/73_plugin_formatting.bats | 6 +-- test/bats/80_alerts.bats | 16 +++---- test/bats/81_alert_context.bats | 8 ++-- test/bats/90_decisions.bats | 4 +- test/lib/config/config-local | 1 - 22 files changed, 158 insertions(+), 159 deletions(-) diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 7051b4d33a3..d8d369eedf4 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -24,8 +24,8 @@ teardown() { #---------- @test "crowdsec (usage)" { - rune -0 wait-for --out "Usage of " "${CROWDSEC}" -h - rune -0 wait-for --out "Usage of " "${CROWDSEC}" --help + rune -0 wait-for --out "Usage of " "$CROWDSEC" -h + rune -0 wait-for --out "Usage of " "$CROWDSEC" --help } @test "crowdsec (unknown flag)" { @@ -33,19 +33,19 @@ teardown() { } @test "crowdsec (unknown argument)" { - rune -0 wait-for --err "argument provided but not defined: trololo" "${CROWDSEC}" trololo + rune -0 wait-for --err "argument provided but not defined: trololo" "$CROWDSEC" trololo } @test "crowdsec (no api and no agent)" { rune -0 wait-for \ --err "you must run at least the API Server or crowdsec" \ - "${CROWDSEC}" -no-api -no-cs + "$CROWDSEC" -no-api -no-cs } @test "crowdsec - print error on exit" { # errors that cause program termination are printed to stderr, not only logs config_set '.db_config.type="meh"' - rune -1 "${CROWDSEC}" + rune -1 "$CROWDSEC" assert_stderr --partial "unable to create database client: unknown database type 'meh'" } @@ -53,23 +53,23 @@ teardown() { config_set '.common={}' rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" refute_output config_set 'del(.common)' rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" refute_output } @test "CS_LAPI_SECRET not strong enough" { - CS_LAPI_SECRET=foo rune -1 wait-for "${CROWDSEC}" + CS_LAPI_SECRET=foo rune -1 wait-for "$CROWDSEC" assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" } @test "crowdsec - reload (change of logfile, disabled agent)" { - logdir1=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + logdir1=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) log_old="${logdir1}/crowdsec.log" config_set ".common.log_dir=\"${logdir1}\"" @@ -81,7 +81,7 @@ teardown() { assert_file_exists "$log_old" assert_file_contains "$log_old" "Starting processing data" - logdir2=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + logdir2=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) log_new="${logdir2}/crowdsec.log" config_set ".common.log_dir=\"${logdir2}\"" @@ -137,7 +137,7 @@ teardown() { ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') rm -f "$ACQUIS_YAML" - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "acquis.yaml: no such file or directory" } @@ -150,7 +150,7 @@ teardown() { rm -f "$ACQUIS_DIR" config_set '.common.log_media="stdout"' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" # check warning assert_stderr --partial "no acquisition file found" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" @@ -166,7 +166,7 @@ teardown() { config_set '.crowdsec_service.acquisition_dir=""' config_set '.common.log_media="stdout"' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" # check warning assert_stderr --partial "no acquisition_path or acquisition_dir specified" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" @@ -184,13 +184,13 @@ teardown() { rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" # now, if foo.yaml is empty instead, there won't be valid datasources. cat /dev/null >"$ACQUIS_DIR"/foo.yaml - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } @@ -217,8 +217,8 @@ teardown() { #shellcheck disable=SC2016 rune -0 wait-for \ - --err 'datasource '\''journalctl'\'' is not available: exec: "journalctl": executable file not found in ' \ - env PATH='' "${CROWDSEC}" + --err 'datasource '\''journalctl'\'' is not available: exec: \\"journalctl\\": executable file not found in ' \ + env PATH='' "$CROWDSEC" # if all datasources are disabled, crowdsec should exit @@ -226,7 +226,7 @@ teardown() { rm -f "$ACQUIS_YAML" config_set '.crowdsec_service.acquisition_path=""' - rune -1 wait-for env PATH='' "${CROWDSEC}" + rune -1 wait-for env PATH='' "$CROWDSEC" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } @@ -237,11 +237,11 @@ teardown() { # if filenames are missing, it won't be able to detect source type config_set "$ACQUIS_YAML" '.source="file"' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "failed to configure datasource file: no filename or filenames configuration provided" config_set "$ACQUIS_YAML" '.filenames=["file.log"]' config_set "$ACQUIS_YAML" '.meh=3' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "field meh not found in type fileacquisition.FileConfiguration" } diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 1b7940615ed..0f6c41cc53d 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -27,25 +27,25 @@ teardown() { @test "lapi (.api.server.enable=false)" { rune -0 config_set '.api.server.enable=false' - rune -1 "${CROWDSEC}" -no-cs + rune -1 "$CROWDSEC" -no-cs assert_stderr --partial "you must run at least the API Server or crowdsec" } @test "lapi (no .api.server.listen_uri)" { rune -0 config_set 'del(.api.server.listen_socket) | del(.api.server.listen_uri)' - rune -1 "${CROWDSEC}" -no-cs + rune -1 "$CROWDSEC" -no-cs assert_stderr --partial "no listen_uri or listen_socket specified" } @test "lapi (bad .api.server.listen_uri)" { rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:-80"' - rune -1 "${CROWDSEC}" -no-cs + rune -1 "$CROWDSEC" -no-cs assert_stderr --partial "local API server stopped with error: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" } @test "lapi (listen on random port)" { config_set '.common.log_media="stdout"' rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' - rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "${CROWDSEC}" -no-cs + rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "$CROWDSEC" -no-cs } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 7e74f6f9714..33dd2e12ec2 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -51,7 +51,7 @@ teardown() { assert_stderr --partial "Constraint_acquis:" # should work without configuration file - rm "${CONFIG_YAML}" + rm "$CONFIG_YAML" rune -0 cscli version assert_stderr --partial "version:" } @@ -62,7 +62,7 @@ teardown() { assert_line --regexp ".* help .* Help about any command" # should work without configuration file - rm "${CONFIG_YAML}" + rm "$CONFIG_YAML" rune -0 cscli help assert_line "Available Commands:" } @@ -132,7 +132,7 @@ teardown() { @test "cscli - required configuration paths" { - config=$(cat "${CONFIG_YAML}") + config=$(cat "$CONFIG_YAML") configdir=$(config_get '.config_paths.config_dir') # required configuration paths with no defaults @@ -140,12 +140,12 @@ teardown() { config_set 'del(.config_paths)' rune -1 cscli hub list assert_stderr --partial 'no configuration paths provided' - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" config_set 'del(.config_paths.data_dir)' rune -1 cscli hub list assert_stderr --partial "please provide a data directory with the 'data_dir' directive in the 'config_paths' section" - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" # defaults @@ -153,13 +153,13 @@ teardown() { rune -0 cscli hub list rune -0 cscli config show --key Config.ConfigPaths.HubDir assert_output "$configdir/hub" - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" config_set 'del(.config_paths.index_path)' rune -0 cscli hub list rune -0 cscli config show --key Config.ConfigPaths.HubIndexFile assert_output "$configdir/hub/.index.json" - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" } @test "cscli config show-yaml" { @@ -182,30 +182,30 @@ teardown() { assert_stderr --partial "failed to backup config: while creating /dev/null/blah: mkdir /dev/null/blah: not a directory" # pick a dirpath - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) # succeed the first time - rune -0 cscli config backup "${backupdir}" + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" # don't overwrite an existing backup - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" assert_stderr --partial "failed to backup config" assert_stderr --partial "file exists" SIMULATION_YAML="$(config_get '.config_paths.simulation_path')" # restore - rm "${SIMULATION_YAML}" - rune -0 cscli config restore "${backupdir}" - assert_file_exists "${SIMULATION_YAML}" + rm "$SIMULATION_YAML" + rune -0 cscli config restore "$backupdir" + assert_file_exists "$SIMULATION_YAML" # cleanup rm -rf -- "${backupdir:?}" # backup: detect missing files - rm "${SIMULATION_YAML}" - rune -1 cscli config backup "${backupdir}" + rm "$SIMULATION_YAML" + rune -1 cscli config backup "$backupdir" assert_stderr --regexp "failed to backup config: failed copy .* to .*: stat .*: no such file or directory" rm -rf -- "${backupdir:?}" } @@ -221,7 +221,7 @@ teardown() { @test "cscli - missing LAPI credentials file" { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - rm -f "${LOCAL_API_CREDENTIALS}" + rm -f "$LOCAL_API_CREDENTIALS" rune -1 cscli lapi status assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" @@ -234,7 +234,7 @@ teardown() { @test "cscli - empty LAPI credentials file" { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - : > "${LOCAL_API_CREDENTIALS}" + : > "$LOCAL_API_CREDENTIALS" rune -1 cscli lapi status assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" @@ -259,7 +259,7 @@ teardown() { @test "cscli - malformed LAPI url" { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "${LOCAL_API_CREDENTIALS}" '.url="http://127.0.0.1:-80"' + config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) @@ -269,7 +269,7 @@ teardown() { @test "cscli - bad LAPI password" { rune -0 ./instance-crowdsec start LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "${LOCAL_API_CREDENTIALS}" '.password="meh"' + config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) @@ -286,7 +286,7 @@ teardown() { rune -0 cscli completion fish assert_output --partial "# fish completion for cscli" - rm "${CONFIG_YAML}" + rm "$CONFIG_YAML" rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" } diff --git a/test/bats/02_nolapi.bats b/test/bats/02_nolapi.bats index f1d810bc166..cefa6d798b4 100644 --- a/test/bats/02_nolapi.bats +++ b/test/bats/02_nolapi.bats @@ -27,12 +27,12 @@ teardown() { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "CrowdSec Local API listening" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "crowdsec should not run without LAPI (-no-api flag)" { config_set '.common.log_media="stdout"' - rune -1 wait-for "${CROWDSEC}" -no-api + rune -1 wait-for "$CROWDSEC" -no-api } @test "crowdsec should not run without LAPI (no api.server in configuration file)" { @@ -40,7 +40,7 @@ teardown() { config_log_stderr rune -0 wait-for \ --err "crowdsec local API is disabled" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi status shouldn't be ok without api.server" { @@ -68,10 +68,10 @@ teardown() { @test "cscli config backup" { config_disable_lapi - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) - rune -0 cscli config backup "${backupdir}" + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" rm -rf -- "${backupdir:?}" assert_stderr --partial "failed to backup config" diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index e75e375ad1c..60731b90713 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -26,14 +26,14 @@ teardown() { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "no agent: crowdsec LAPI should run (-no-cs flag)" { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "CrowdSec Local API listening" \ - "${CROWDSEC}" -no-cs + "$CROWDSEC" -no-cs } @test "no agent: crowdsec LAPI should run (no crowdsec_service in configuration file)" { @@ -41,7 +41,7 @@ teardown() { config_log_stderr rune -0 wait-for \ --err "crowdsec agent is disabled" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "no agent: cscli config show" { @@ -62,10 +62,10 @@ teardown() { @test "no agent: cscli config backup" { config_disable_agent - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) - rune -0 cscli config backup "${backupdir}" + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" assert_stderr --partial "failed to backup config" assert_stderr --partial "file exists" diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index 234db182a53..c02a75810b9 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -27,7 +27,7 @@ teardown() { rune -0 wait-for \ --err "Communication with CrowdSec Central API disabled from args" \ - "${CROWDSEC}" -no-capi + "$CROWDSEC" -no-capi } @test "without capi: crowdsec LAPI should still work" { @@ -35,7 +35,7 @@ teardown() { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "push and pull to Central API disabled" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "without capi: cscli capi status -> fail" { @@ -53,10 +53,10 @@ teardown() { @test "no agent: cscli config backup" { config_disable_capi - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) - rune -0 cscli config backup "${backupdir}" + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" assert_stderr --partial "failed to backup config" assert_stderr --partial "file exists" rm -rf -- "${backupdir:?}" diff --git a/test/bats/05_config_yaml_local.bats b/test/bats/05_config_yaml_local.bats index b8b6da117ea..ec7a4201964 100644 --- a/test/bats/05_config_yaml_local.bats +++ b/test/bats/05_config_yaml_local.bats @@ -21,7 +21,7 @@ setup() { load "../lib/setup.sh" ./instance-data load rune -0 config_get '.api.client.credentials_path' - LOCAL_API_CREDENTIALS="${output}" + LOCAL_API_CREDENTIALS="$output" export LOCAL_API_CREDENTIALS } @@ -88,13 +88,13 @@ teardown() { @test "simulation.yaml.local" { rune -0 config_get '.config_paths.simulation_path' refute_output null - SIMULATION="${output}" + SIMULATION="$output" - echo "simulation: off" >"${SIMULATION}" + echo "simulation: off" >"$SIMULATION" rune -0 cscli simulation status -o human assert_stderr --partial "global simulation: disabled" - echo "simulation: on" >"${SIMULATION}" + echo "simulation: on" >"$SIMULATION" rune -0 cscli simulation status -o human assert_stderr --partial "global simulation: enabled" @@ -110,7 +110,7 @@ teardown() { @test "profiles.yaml.local" { rune -0 config_get '.api.server.profiles_path' refute_output null - PROFILES="${output}" + PROFILES="$output" cat <<-EOT >"${PROFILES}.local" name: default_ip_remediation @@ -122,17 +122,17 @@ teardown() { on_success: break EOT - tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) - touch "${tmpfile}" + tmpfile=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp) + touch "$tmpfile" ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') - echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" + echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"$ACQUIS_YAML" rune -0 cscli collections install crowdsecurity/sshd rune -0 cscli parsers install crowdsecurity/syslog-logs ./instance-crowdsec start sleep .5 - fake_log >>"${tmpfile}" + fake_log >>"$tmpfile" # this could be simplified, but some systems are slow and we don't want to # wait more than required @@ -141,6 +141,6 @@ teardown() { rune -0 cscli decisions list -o json rune -0 jq --exit-status '.[].decisions[0] | [.value,.type] == ["1.1.1.172","captcha"]' <(output) && break done - rm -f -- "${tmpfile}" - [[ "${status}" -eq 0 ]] || fail "captcha not triggered" + rm -f -- "$tmpfile" + [[ "$status" -eq 0 ]] || fail "captcha not triggered" } diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 2c39aae3079..438bec40242 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -7,7 +7,7 @@ setup_file() { load "../lib/setup_file.sh" ./instance-data load - tmpdir="${BATS_FILE_TMPDIR}" + tmpdir="$BATS_FILE_TMPDIR" export tmpdir CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" diff --git a/test/bats/13_capi_whitelists.bats b/test/bats/13_capi_whitelists.bats index d05a9d93294..ed7ef2ac560 100644 --- a/test/bats/13_capi_whitelists.bats +++ b/test/bats/13_capi_whitelists.bats @@ -31,7 +31,7 @@ teardown() { @test "capi_whitelists: file missing" { rune -0 wait-for \ --err "while opening capi whitelist file: open $CAPI_WHITELISTS_YAML: no such file or directory" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: error on open" { @@ -40,11 +40,11 @@ teardown() { if is_package_testing; then rune -0 wait-for \ --err "while parsing capi whitelist file .*: empty file" \ - "${CROWDSEC}" + "$CROWDSEC" else rune -0 wait-for \ --err "while opening capi whitelist file: open $CAPI_WHITELISTS_YAML: permission denied" \ - "${CROWDSEC}" + "$CROWDSEC" fi } @@ -52,28 +52,28 @@ teardown() { echo > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "while parsing capi whitelist file '$CAPI_WHITELISTS_YAML': empty file" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: empty lists" { echo '{"ips": [], "cidrs": []}' > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: bad ip" { echo '{"ips": ["blahblah"], "cidrs": []}' > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "while parsing capi whitelist file '$CAPI_WHITELISTS_YAML': invalid IP address: blahblah" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: bad cidr" { echo '{"ips": [], "cidrs": ["blahblah"]}' > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "while parsing capi whitelist file '$CAPI_WHITELISTS_YAML': invalid CIDR address: blahblah" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: file with ip and cidr values" { diff --git a/test/bats/20_hub_collections_dep.bats b/test/bats/20_hub_collections_dep.bats index c3df948a353..673b812dc0d 100644 --- a/test/bats/20_hub_collections_dep.bats +++ b/test/bats/20_hub_collections_dep.bats @@ -121,6 +121,6 @@ teardown() { rune -1 cscli hub list assert_stderr --partial "circular dependency detected" - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "circular dependency detected" } diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 6909c89cb1f..877f8672b24 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -7,10 +7,10 @@ setup_file() { load "../lib/setup_file.sh" ./instance-data load - CONFIG_DIR=$(dirname "${CONFIG_YAML}") + CONFIG_DIR=$(dirname "$CONFIG_YAML") export CONFIG_DIR - tmpdir="${BATS_FILE_TMPDIR}" + tmpdir="$BATS_FILE_TMPDIR" export tmpdir CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" @@ -80,7 +80,7 @@ teardown() { rune -0 wait-for \ --err "missing TLS key file" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "missing cert_file" { @@ -88,7 +88,7 @@ teardown() { rune -0 wait-for \ --err "missing TLS cert file" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "invalid OU for agent" { diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 36220375b87..0e167d3d077 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -32,14 +32,14 @@ setup() { #---------- @test "-type and -dsn are required together" { - rune -1 "${CROWDSEC}" -no-api -type syslog + rune -1 "$CROWDSEC" -no-api -type syslog assert_stderr --partial "-type requires a -dsn argument" - rune -1 "${CROWDSEC}" -no-api -dsn file:///dev/fd/0 + rune -1 "$CROWDSEC" -no-api -dsn file:///dev/fd/0 assert_stderr --partial "-dsn requires a -type argument" } @test "the one-shot mode works" { - rune -0 "${CROWDSEC}" -dsn file://<(fake_log) -type syslog -no-api + rune -0 "$CROWDSEC" -dsn file://<(fake_log) -type syslog -no-api refute_output assert_stderr --partial "single file mode : log_media=stdout daemonize=false" assert_stderr --regexp "Adding file .* to filelist" diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index a544f67be18..122ea05e41a 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -35,20 +35,20 @@ teardown() { #---------- @test "1.1.1.172 has been banned" { - tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) - touch "${tmpfile}" + tmpfile=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp) + touch "$tmpfile" ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') - echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" + echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"$ACQUIS_YAML" ./instance-crowdsec start sleep 0.2 - fake_log >>"${tmpfile}" + fake_log >>"$tmpfile" sleep 0.2 - rm -f -- "${tmpfile}" + rm -f -- "$tmpfile" found=0 # this may take some time in CI @@ -59,5 +59,5 @@ teardown() { fi sleep 0.2 done - assert_equal 1 "${found}" + assert_equal 1 "$found" } diff --git a/test/bats/50_simulation.bats b/test/bats/50_simulation.bats index 0d29d6bfd52..ab4145551ff 100644 --- a/test/bats/50_simulation.bats +++ b/test/bats/50_simulation.bats @@ -33,7 +33,7 @@ setup() { @test "we have one decision" { rune -0 cscli simulation disable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list -o json rune -0 jq '. | length' <(output) assert_output 1 @@ -41,7 +41,7 @@ setup() { @test "1.1.1.174 has been banned (exact)" { rune -0 cscli simulation disable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list -o json rune -0 jq -r '.[].decisions[0].value' <(output) assert_output '1.1.1.174' @@ -49,7 +49,7 @@ setup() { @test "decision has simulated == false (exact)" { rune -0 cscli simulation disable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list -o json rune -0 jq '.[].decisions[0].simulated' <(output) assert_output 'false' @@ -57,7 +57,7 @@ setup() { @test "simulated scenario, listing non-simulated: expect no decision" { rune -0 cscli simulation enable crowdsecurity/ssh-bf - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list --no-simu -o json assert_json '[]' } @@ -65,7 +65,7 @@ setup() { @test "global simulation, listing non-simulated: expect no decision" { rune -0 cscli simulation disable crowdsecurity/ssh-bf rune -0 cscli simulation enable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list --no-simu -o json assert_json '[]' } diff --git a/test/bats/70_plugin_http.bats b/test/bats/70_plugin_http.bats index a8b860aab83..462fc7c9406 100644 --- a/test/bats/70_plugin_http.bats +++ b/test/bats/70_plugin_http.bats @@ -15,7 +15,7 @@ setup_file() { export MOCK_URL PLUGIN_DIR=$(config_get '.config_paths.plugin_dir') # could have a trailing slash - PLUGIN_DIR=$(realpath "${PLUGIN_DIR}") + PLUGIN_DIR=$(realpath "$PLUGIN_DIR") export PLUGIN_DIR # https://mikefarah.gitbook.io/yq/operators/env-variable-operators @@ -35,10 +35,10 @@ setup_file() { .plugin_config.group="" ' - rm -f -- "${MOCK_OUT}" + rm -f -- "$MOCK_OUT" ./instance-crowdsec start - ./instance-mock-http start "${MOCK_PORT}" + ./instance-mock-http start "$MOCK_PORT" } teardown_file() { @@ -63,24 +63,24 @@ setup() { } @test "expected 1 log line from http server" { - rune -0 wc -l <"${MOCK_OUT}" + rune -0 wc -l <"$MOCK_OUT" # wc can pad with spaces on some platforms rune -0 tr -d ' ' < <(output) assert_output 1 } @test "expected to receive 2 alerts in the request body from plugin" { - rune -0 jq -r '.request_body' <"${MOCK_OUT}" + rune -0 jq -r '.request_body' <"$MOCK_OUT" rune -0 jq -r 'length' <(output) assert_output 2 } @test "expected to receive IP 1.2.3.4 as value of first decision" { - rune -0 jq -r '.request_body[0].decisions[0].value' <"${MOCK_OUT}" + rune -0 jq -r '.request_body[0].decisions[0].value' <"$MOCK_OUT" assert_output 1.2.3.4 } @test "expected to receive IP 1.2.3.5 as value of second decision" { - rune -0 jq -r '.request_body[1].decisions[0].value' <"${MOCK_OUT}" + rune -0 jq -r '.request_body[1].decisions[0].value' <"$MOCK_OUT" assert_output 1.2.3.5 } diff --git a/test/bats/71_plugin_dummy.bats b/test/bats/71_plugin_dummy.bats index 95b64fea070..c242d7ec4bc 100644 --- a/test/bats/71_plugin_dummy.bats +++ b/test/bats/71_plugin_dummy.bats @@ -9,15 +9,15 @@ setup_file() { ./instance-data load - tempfile=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + tempfile=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp) export tempfile - tempfile2=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + tempfile2=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp) export tempfile2 DUMMY_YAML="$(config_get '.config_paths.notification_dir')/dummy.yaml" - config_set "${DUMMY_YAML}" ' + config_set "$DUMMY_YAML" ' .group_wait="5s" | .group_threshold=2 | .output_file=strenv(tempfile) | @@ -67,12 +67,12 @@ setup() { } @test "expected 1 notification" { - rune -0 cat "${tempfile}" + rune -0 cat "$tempfile" assert_output --partial 1.2.3.4 assert_output --partial 1.2.3.5 } @test "second notification works too" { - rune -0 cat "${tempfile2}" + rune -0 cat "$tempfile2" assert_output --partial secondfile } diff --git a/test/bats/72_plugin_badconfig.bats b/test/bats/72_plugin_badconfig.bats index c9a69b9fcb0..7be16c6cf8e 100644 --- a/test/bats/72_plugin_badconfig.bats +++ b/test/bats/72_plugin_badconfig.bats @@ -8,7 +8,7 @@ setup_file() { PLUGIN_DIR=$(config_get '.config_paths.plugin_dir') # could have a trailing slash - PLUGIN_DIR=$(realpath "${PLUGIN_DIR}") + PLUGIN_DIR=$(realpath "$PLUGIN_DIR") export PLUGIN_DIR PROFILES_PATH=$(config_get '.api.server.profiles_path') @@ -26,50 +26,50 @@ setup() { teardown() { ./instance-crowdsec stop - rm -f "${PLUGIN_DIR}"/badname - chmod go-w "${PLUGIN_DIR}"/notification-http || true + rm -f "$PLUGIN_DIR"/badname + chmod go-w "$PLUGIN_DIR"/notification-http || true } #---------- @test "misconfigured plugin, only user is empty" { config_set '.plugin_config.user="" | .plugin_config.group="nogroup"' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: both plugin user and group must be set" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "misconfigured plugin, only group is empty" { config_set '(.plugin_config.user="nobody") | (.plugin_config.group="")' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: both plugin user and group must be set" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "misconfigured plugin, user does not exist" { config_set '(.plugin_config.user="userdoesnotexist") | (.plugin_config.group="groupdoesnotexist")' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: user: unknown user userdoesnotexist" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "misconfigured plugin, group does not exist" { config_set '(.plugin_config.user=strenv(USER)) | (.plugin_config.group="groupdoesnotexist")' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: group: unknown group groupdoesnotexist" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "bad plugin name" { - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' - cp "${PLUGIN_DIR}"/notification-http "${PLUGIN_DIR}"/badname + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + cp "$PLUGIN_DIR"/notification-http "$PLUGIN_DIR"/badname rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: plugin name ${PLUGIN_DIR}/badname is invalid. Name should be like {type-name}" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "duplicate notification config" { @@ -77,58 +77,58 @@ teardown() { # email_default has two configurations rune -0 yq -i '.name="email_default"' "$CONFIG_DIR/notifications/http.yaml" # enable a notification, otherwise plugins are ignored - config_set "${PROFILES_PATH}" '.notifications=["slack_default"]' + config_set "$PROFILES_PATH" '.notifications=["slack_default"]' # the slack plugin may fail or not, but we just need the logs config_set '.common.log_media="stdout"' rune wait-for \ --err "notification 'email_default' is defined multiple times" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "bad plugin permission (group writable)" { - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' - chmod g+w "${PLUGIN_DIR}"/notification-http + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + chmod g+w "$PLUGIN_DIR"/notification-http rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is group writable, group writable plugins are invalid" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "bad plugin permission (world writable)" { - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' - chmod o+w "${PLUGIN_DIR}"/notification-http + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + chmod o+w "$PLUGIN_DIR"/notification-http rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is world writable, world writable plugins are invalid" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "config.yaml: missing .plugin_config section" { config_set 'del(.plugin_config)' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: plugins are enabled, but the plugin_config section is missing in the configuration" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "config.yaml: missing config_paths.notification_dir" { config_set 'del(.config_paths.notification_dir)' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: plugins are enabled, but config_paths.notification_dir is not defined" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "config.yaml: missing config_paths.plugin_dir" { config_set 'del(.config_paths.plugin_dir)' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: plugins are enabled, but config_paths.plugin_dir is not defined" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "unable to run plugin broker: while reading plugin config" { config_set '.config_paths.notification_dir="/this/path/does/not/exist"' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin config: open /this/path/does/not/exist: no such file or directory" \ - "${CROWDSEC}" + "$CROWDSEC" } diff --git a/test/bats/73_plugin_formatting.bats b/test/bats/73_plugin_formatting.bats index 153193fb18f..9ed64837403 100644 --- a/test/bats/73_plugin_formatting.bats +++ b/test/bats/73_plugin_formatting.bats @@ -9,7 +9,7 @@ setup_file() { ./instance-data load - tempfile=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + tempfile=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp) export tempfile DUMMY_YAML="$(config_get '.config_paths.notification_dir')/dummy.yaml" @@ -17,7 +17,7 @@ setup_file() { # we test the template that is suggested in the email notification # the $alert is not a shell variable # shellcheck disable=SC2016 - config_set "${DUMMY_YAML}" ' + config_set "$DUMMY_YAML" ' .group_wait="5s" | .group_threshold=2 | .output_file=strenv(tempfile) | @@ -58,7 +58,7 @@ setup() { } @test "expected 1 notification" { - rune -0 cat "${tempfile}" + rune -0 cat "$tempfile" assert_output - <<-EOT

1.2.3.4 will get ban for next 30s for triggering manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX' on machine githubciXXXXXXXXXXXXXXXXXXXXXXXX.

CrowdSec CTI

1.2.3.5 will get ban for next 30s for triggering manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX' on machine githubciXXXXXXXXXXXXXXXXXXXXXXXX.

CrowdSec CTI

EOT diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index e0fdcb02271..6d84c1a1fce 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -73,9 +73,9 @@ teardown() { rune -0 cscli alerts list -o raw <(output) rune -0 grep 10.20.30.40 <(output) rune -0 cut -d, -f1 <(output) - ALERT_ID="${output}" + ALERT_ID="$output" - rune -0 cscli alerts inspect "${ALERT_ID}" -o human + rune -0 cscli alerts inspect "$ALERT_ID" -o human rune -0 plaintext < <(output) assert_line --regexp '^#+$' assert_line --regexp "^ - ID *: ${ALERT_ID}$" @@ -93,10 +93,10 @@ teardown() { assert_line --regexp "^.* ID .* scope:value .* action .* expiration .* created_at .*$" assert_line --regexp "^.* Ip:10.20.30.40 .* ban .*$" - rune -0 cscli alerts inspect "${ALERT_ID}" -o human --details + rune -0 cscli alerts inspect "$ALERT_ID" -o human --details # XXX can we have something here? - rune -0 cscli alerts inspect "${ALERT_ID}" -o raw + rune -0 cscli alerts inspect "$ALERT_ID" -o raw assert_line --regexp "^ *capacity: 0$" assert_line --regexp "^ *id: ${ALERT_ID}$" assert_line --regexp "^ *origin: cscli$" @@ -106,11 +106,11 @@ teardown() { assert_line --regexp "^ *type: ban$" assert_line --regexp "^ *value: 10.20.30.40$" - rune -0 cscli alerts inspect "${ALERT_ID}" -o json + rune -0 cscli alerts inspect "$ALERT_ID" -o json alert=${output} - rune jq -c '.decisions[] | [.origin,.scenario,.scope,.simulated,.type,.value]' <<<"${alert}" + rune jq -c '.decisions[] | [.origin,.scenario,.scope,.simulated,.type,.value]' <<<"$alert" assert_output --regexp "\[\"cscli\",\"manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX.*'\",\"Ip\",false,\"ban\",\"10.20.30.40\"\]" - rune jq -c '.source' <<<"${alert}" + rune jq -c '.source' <<<"$alert" assert_json '{ip:"10.20.30.40",scope:"Ip",value:"10.20.30.40"}' } @@ -188,7 +188,7 @@ teardown() { rune -0 cscli decisions add -i 10.20.30.40 -t ban rune -9 cscli decisions list --ip 10.20.30.40 -o json rune -9 jq -r '.[].decisions[].id' <(output) - DECISION_ID="${output}" + DECISION_ID="$output" ./instance-crowdsec stop rune -0 ./instance-db exec_sql "UPDATE decisions SET ... WHERE id=${DECISION_ID}" diff --git a/test/bats/81_alert_context.bats b/test/bats/81_alert_context.bats index df741f5f99c..69fb4158ffd 100644 --- a/test/bats/81_alert_context.bats +++ b/test/bats/81_alert_context.bats @@ -32,8 +32,8 @@ teardown() { #---------- @test "$FILE 1.1.1.172 has context" { - tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) - touch "${tmpfile}" + tmpfile=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp) + touch "$tmpfile" ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') @@ -61,9 +61,9 @@ teardown() { ./instance-crowdsec start sleep 2 - fake_log >>"${tmpfile}" + fake_log >>"$tmpfile" sleep 2 - rm -f -- "${tmpfile}" + rm -f -- "$tmpfile" rune -0 cscli alerts list -o json rune -0 jq '.[0].id' <(output) diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 8a2b9d3ae6f..f0213a8a236 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -166,7 +166,7 @@ teardown() { # silently discarding (but logging) invalid decisions rune -0 cscli alerts delete --all - truncate -s 0 "${LOGFILE}" + truncate -s 0 "$LOGFILE" rune -0 cscli decisions import -i - --format values <<-EOT whatever @@ -182,7 +182,7 @@ teardown() { rune -0 cscli alerts delete --all - truncate -s 0 "${LOGFILE}" + truncate -s 0 "$LOGFILE" rune -0 cscli decisions import -i - --format values <<-EOT 1.2.3.4 diff --git a/test/lib/config/config-local b/test/lib/config/config-local index e5cfaf997be..693b157f531 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -80,7 +80,6 @@ config_generate() { .common.daemonize=true | del(.common.pid_dir) | .common.log_level="info" | - .common.force_color_logs=true | .common.log_dir=strenv(LOG_DIR) | .config_paths.config_dir=strenv(CONFIG_DIR) | .config_paths.data_dir=strenv(DATA_DIR) | From 14dc26b128042cc802ddaed0e277e27359022670 Mon Sep 17 00:00:00 2001 From: Blesswin Samuel Date: Fri, 10 May 2024 18:30:34 +0530 Subject: [PATCH 120/318] Add ability to configure channel, username, icon emoji/url in slack notification (#2975) * Add ability to configure channel, username, icon emoji/url in slack cfg * Update slack.yaml --------- Co-authored-by: Thibault "bui" Koechlin Co-authored-by: Thibault "bui" Koechlin --- cmd/notification-slack/main.go | 18 +++++++++++++----- cmd/notification-slack/slack.yaml | 6 ++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index 1e73d005fd8..fba1b33e334 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -14,9 +14,13 @@ import ( ) type PluginConfig struct { - Name string `yaml:"name"` - Webhook string `yaml:"webhook"` - LogLevel *string `yaml:"log_level"` + Name string `yaml:"name"` + Webhook string `yaml:"webhook"` + Channel string `yaml:"channel"` + Username string `yaml:"username"` + IconEmoji string `yaml:"icon_emoji"` + IconURL string `yaml:"icon_url"` + LogLevel *string `yaml:"log_level"` } type Notify struct { ConfigByName map[string]PluginConfig @@ -43,8 +47,12 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name)) logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text)) - err := slack.PostWebhookContext(ctx, n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{ - Text: notification.Text, + err := slack.PostWebhookContext(ctx, cfg.Webhook, &slack.WebhookMessage{ + Text: notification.Text, + Channel: cfg.Channel, + Username: cfg.Username, + IconEmoji: cfg.IconEmoji, + IconURL: cfg.IconURL, }) if err != nil { logger.Error(err.Error()) diff --git a/cmd/notification-slack/slack.yaml b/cmd/notification-slack/slack.yaml index 4768e869780..677d4b757c1 100644 --- a/cmd/notification-slack/slack.yaml +++ b/cmd/notification-slack/slack.yaml @@ -28,6 +28,12 @@ format: | webhook: +# API request data as defined by the Slack webhook API. +#channel: +#username: +#icon_emoji: +#icon_url: + --- # type: slack From 26b3410ef1c807a9cd4bde4c66a46d73c38f60ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 09:48:08 +0200 Subject: [PATCH 121/318] CI: improve test "cscli machines prune" (#2992) --- test/bats/30_machines.bats | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 2a04cc9bc20..415e5f8693f 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -94,7 +94,10 @@ teardown() { @test "cscli machines prune" { rune -0 cscli metrics - rune -0 cscli machines prune + # if the fixture has been created some time ago, + # the machines may be old enough to trigger a user prompt. + # make sure the prune duration is high enough. + rune -0 cscli machines prune --duration 1000000h assert_output 'No machines to prune.' rune -0 cscli machines list -o json From b4e79aaf0ad86361cdcd03776061bf8d6ff7b3a9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 10:01:48 +0200 Subject: [PATCH 122/318] use go 1.22.3 (#2994) * use go 1.22.3; alpine 3.19 * revert to alpine 3.18 --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 72694276ac1..941d4d15f13 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index e52c4759254..8cc544523ef 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index c603e468a7b..87101be0159 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 345f8761868..4f724656ed6 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 0ea60a31dd5..9bca2122513 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e4c078f3762..cee77161a8e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index d8ec22e6a02..e707062b5ad 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3674358ff28..3496674e7b9 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index d2a5df0535c..bf8d5fe5f1a 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index d67d534fe3d..aabb0a24fc5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.2-alpine3.18 AS build +FROM golang:1.22.3-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 4fc3a923823..655eeb0225d 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.2-bookworm AS build +FROM golang:1.22.3-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 99909bbb467..0fc53242a9f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.2' + version: '1.22.3' - pwsh: | choco install -y make From e4a8d3b99e20b113b947156cadc3f41ecdfa7f59 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 12:01:17 +0200 Subject: [PATCH 123/318] deb,rpm: include empty directory etc/crowdsec/acquis.d (#2997) --- debian/rules | 1 + rpm/SPECS/crowdsec.spec | 1 + wizard.sh | 2 ++ 3 files changed, 4 insertions(+) diff --git a/debian/rules b/debian/rules index 655af3dfeea..50daae08754 100755 --- a/debian/rules +++ b/debian/rules @@ -17,6 +17,7 @@ override_dh_auto_install: mkdir -p debian/crowdsec/usr/bin mkdir -p debian/crowdsec/etc/crowdsec + mkdir -p debian/crowdsec/etc/crowdsec/acquis.d mkdir -p debian/crowdsec/usr/share/crowdsec mkdir -p debian/crowdsec/etc/crowdsec/hub/ mkdir -p debian/crowdsec/usr/share/crowdsec/config diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index 60884dfd4e6..0a20dc97deb 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -37,6 +37,7 @@ sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config %install rm -rf %{buildroot} +mkdir -p %{buildroot}/etc/crowdsec/acquis.d mkdir -p %{buildroot}/etc/crowdsec/hub mkdir -p %{buildroot}/etc/crowdsec/patterns mkdir -p %{buildroot}/etc/crowdsec/console/ diff --git a/wizard.sh b/wizard.sh index 598f0c765f0..a3afc789bf5 100755 --- a/wizard.sh +++ b/wizard.sh @@ -409,12 +409,14 @@ check_cs_version () { install_crowdsec() { mkdir -p "${CROWDSEC_DATA_DIR}" (cd config && find patterns -type f -exec install -Dm 644 "{}" "${CROWDSEC_CONFIG_PATH}/{}" \; && cd ../) || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/acquis.d" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/scenarios" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/postoverflows" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/collections" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/patterns" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/appsec-configs" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/appsec-rules" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/contexts" || exit mkdir -p "${CROWDSEC_CONSOLE_DIR}" || exit # tmp From e3c6a5ba70df511c212d8927ed24aaade56271f0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 17:09:30 +0200 Subject: [PATCH 124/318] LAPI: support CRL files with multiple PEM blocks (#3002) --- pkg/apiserver/middlewares/v1/tls_auth.go | 63 +++++++++++----------- test/bats/11_bouncers_tls.bats | 50 ++++++++++++------ test/bats/30_machines_tls.bats | 67 +++++++++++++++--------- 3 files changed, 108 insertions(+), 72 deletions(-) diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index bd2c4bb30e7..c2fcc9c7264 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -5,6 +5,7 @@ import ( "crypto" "crypto/x509" "encoding/pem" + "errors" "fmt" "io" "net/http" @@ -135,31 +136,35 @@ func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) { return false, false } - crlBinary, rest := pem.Decode(crlContent) - if len(rest) > 0 { - ta.logger.Warn("CRL file contains more than one PEM block, ignoring the rest") - } + var crlBlock *pem.Block - crl, err := x509.ParseRevocationList(crlBinary.Bytes) - if err != nil { - ta.logger.Errorf("could not parse CRL file, skipping check: %s", err) - return false, false - } + for { + crlBlock, crlContent = pem.Decode(crlContent) + if crlBlock == nil { + break // no more PEM blocks + } - now := time.Now().UTC() + crl, err := x509.ParseRevocationList(crlBlock.Bytes) + if err != nil { + ta.logger.Errorf("could not parse a PEM block in CRL file, skipping: %s", err) + continue + } - if now.After(crl.NextUpdate) { - ta.logger.Warn("CRL has expired, will still validate the cert against it.") - } + now := time.Now().UTC() - if now.Before(crl.ThisUpdate) { - ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") - } + if now.After(crl.NextUpdate) { + ta.logger.Warn("CRL has expired, will still validate the cert against it.") + } - for _, revoked := range crl.RevokedCertificateEntries { - if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { - ta.logger.Warn("client certificate is revoked by CRL") - return true, true + if now.Before(crl.ThisUpdate) { + ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") + } + + for _, revoked := range crl.RevokedCertificateEntries { + if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { + ta.logger.Warn("client certificate is revoked by CRL") + return true, true + } } } @@ -181,9 +186,7 @@ func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) ( } revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer) - revokedByCRL, cacheCRL := ta.isCRLRevoked(cert) - revoked := revokedByOCSP || revokedByCRL if cacheOCSP && cacheCRL { @@ -203,8 +206,8 @@ func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) ( revoked, err := ta.isRevoked(cert, issuer) if err != nil { - //Fail securely, if we can't check the revocation status, let's consider the cert invalid - //We may change this in the future based on users feedback, but this seems the most sensible thing to do + // Fail securely, if we can't check the revocation status, let's consider the cert invalid + // We may change this in the future based on users feedback, but this seems the most sensible thing to do return true, fmt.Errorf("could not check for client certification revocation status: %w", err) } @@ -213,12 +216,12 @@ func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) ( func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { for _, ou := range allowedOus { - //disallow empty ou + // disallow empty ou if ou == "" { - return fmt.Errorf("empty ou isn't allowed") + return errors.New("empty ou isn't allowed") } - //drop & warn on duplicate ou + // drop & warn on duplicate ou ok := true for _, validOu := range ta.AllowedOUs { @@ -238,11 +241,11 @@ func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { } func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { - //Checks cert validity, Returns true + CN if client cert matches requested OU + // Checks cert validity, Returns true + CN if client cert matches requested OU var clientCert *x509.Certificate if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { - //do not error if it's not TLS or there are no peer certs + // do not error if it's not TLS or there are no peer certs return false, "", nil } @@ -279,7 +282,7 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { return true, clientCert.Subject.CommonName, nil } - return false, "", fmt.Errorf("no verified cert in request") + return false, "", errors.New("no verified cert in request") } func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 438bec40242..84de3d2f488 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -13,24 +13,37 @@ setup_file() { CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" export CFDIR - #gen the CA + # Generate the CA cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - #gen an intermediate + + # Generate an intermediate cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - #gen server cert for crowdsec with the intermediate + + # Generate server cert for crowdsec with the intermediate cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - #gen client cert for the bouncer + + # Generate client cert for the bouncer cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer" - #gen client cert for the bouncer with an invalid OU + + # Genearte client cert for the bouncer with an invalid OU cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_bad_ou" - #gen client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate + + # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_invalid" - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_revoked" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/bouncer_revoked.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials.txt" - cfssl gencrl "${tmpdir}/serials.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl.pem" + # Generate revoked client certs + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" + serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" + echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + done + + # Generate separate CRL blocks and concatenate them + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + done + cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" @@ -90,11 +103,14 @@ teardown() { } @test "simulate one bouncer request with a revoked certificate" { - truncate_log - rune -0 curl -i -s --cert "${tmpdir}/bouncer_revoked.pem" --key "${tmpdir}/bouncer_revoked-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" - assert_output --partial "access forbidden" - rune -0 cscli bouncers list -o json - assert_output "[]" + # we have two certificates revoked by different CRL blocks + for cert_name in "revoked_1" "revoked_2"; do + truncate_log + rune -0 curl -i -s --cert "${tmpdir}/${cert_name}.pem" --key "${tmpdir}/${cert_name}-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" + assert_output --partial "access forbidden" + rune -0 cscli bouncers list -o json + assert_output "[]" + done } diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 877f8672b24..b6d089d68e7 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -16,24 +16,37 @@ setup_file() { CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" export CFDIR - #gen the CA + # Generate the CA cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - #gen an intermediate + + # Generate an intermediate cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - #gen server cert for crowdsec with the intermediate + + # Generate server cert for crowdsec with the intermediate cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - #gen client cert for the agent + + # Generate client cert for the agent cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent" - #gen client cert for the agent with an invalid OU + + # Genearte client cert for the agent with an invalid OU cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_bad_ou" - #gen client cert for the agent directly signed by the CA, it should be refused by crowdsec as uses the intermediate + + # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_invalid" - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_revoked" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/agent_revoked.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials.txt" - cfssl gencrl "${tmpdir}/serials.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl.pem" + # Generate revoked client cert + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" + serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" + echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + done + + # Generate separate CRL blocks and concatenate them + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + done + cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" @@ -181,19 +194,23 @@ teardown() { } @test "revoked cert for agent" { - truncate_log - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' - .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent_revoked-key.pem" | - .cert_path=strenv(tmpdir) + "/agent_revoked.pem" | - .url="https://127.0.0.1:8080" - ' - - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' - ./instance-crowdsec start - rune -1 cscli lapi status - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" - rune -0 cscli machines list -o json - assert_output '[]' + # we have two certificates revoked by different CRL blocks + for cert_name in "revoked_1" "revoked_2"; do + truncate_log + cert_name="$cert_name" config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .key_path=strenv(tmpdir) + "/" + strenv(cert_name) + "-key.pem" | + .cert_path=strenv(tmpdir) + "/" + strenv(cert_name) + ".pem" | + .url="https://127.0.0.1:8080" + ' + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + ./instance-crowdsec start + rune -1 cscli lapi status + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" + rune -0 cscli machines list -o json + assert_output '[]' + ./instance-crowdsec stop + done } From 11893b2915c9ae9d153cc6a20355e356b08599db Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 14 May 2024 14:55:08 +0100 Subject: [PATCH 125/318] [cscli] Add tab completion to notifications [test, inspect] (#2765) * Add cscli notifications test ValidArgsFunction for tab completion * Split function to reusable and pass to inspect since both rely on a valid plugin name, fix short/long of list command * Short should be short * Removed redundant prerune since Args with validate it for us * Make english more english * fix: bad merge now fixed --------- Co-authored-by: Thibault "bui" Koechlin --- cmd/crowdsec-cli/notifications.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 0b5ee537806..cb102df6928 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -10,6 +10,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "time" @@ -156,8 +157,8 @@ func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, func (cli *cliNotifications) NewListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", - Short: "list active notifications plugins", - Long: `list active notifications plugins`, + Short: "list notifications plugins", + Long: `list notifications plugins and their status (active or not)`, Example: `cscli notifications list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, @@ -205,10 +206,11 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { func (cli *cliNotifications) NewInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", - Short: "Inspect active notifications plugin configuration", - Long: `Inspect active notifications plugin and show configuration`, + Short: "Inspect notifications plugin", + Long: `Inspect notifications plugin and show configuration`, Example: `cscli notifications inspect `, Args: cobra.ExactArgs(1), + ValidArgsFunction: cli.notificationConfigFilter, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { cfg := cli.cfg() @@ -243,7 +245,21 @@ func (cli *cliNotifications) NewInspectCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewTestCmd() *cobra.Command { +func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ncfgs, err := cli.getProfilesConfigs() + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + var ret []string + for k := range ncfgs { + if strings.Contains(k, toComplete) && !slices.Contains(args, k) { + ret = append(ret, k) + } + } + return ret, cobra.ShellCompDirectiveNoFileComp +} + +func (cli cliNotifications) NewTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb @@ -253,10 +269,11 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command { cmd := &cobra.Command{ Use: "test [plugin name]", Short: "send a generic test alert to notification plugin", - Long: `send a generic test alert to a notification plugin to test configuration even if is not active`, + Long: `send a generic test alert to a notification plugin even if it is not active in profiles`, Example: `cscli notifications test [plugin_name]`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, + ValidArgsFunction: cli.notificationConfigFilter, PreRunE: func(_ *cobra.Command, args []string) error { cfg := cli.cfg() pconfigs, err := cli.getPluginConfigs() From b5e5078fc72c32bd94e86b90ecf27698eceb6fb5 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 14 May 2024 17:31:15 +0100 Subject: [PATCH 126/318] fix: add sslmode check and apend flag to conn string (#3009) --- pkg/csconfig/database.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 9a9ed9a9f7f..2fe610eba68 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -48,7 +48,7 @@ type AuthGCCfg struct { } type FlushDBCfg struct { - MaxItems *int `yaml:"max_items,omitempty"` + MaxItems *int `yaml:"max_items,omitempty"` // We could unmarshal as time.Duration, but alert filters right now are a map of strings MaxAge *string `yaml:"max_age,omitempty"` BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` @@ -131,6 +131,9 @@ func (d *DatabaseCfg) ConnectionString() string { } else { connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", d.User, d.Password, d.Host, d.Port, d.DbName) } + if d.Sslmode != "" { + connString = fmt.Sprintf("%s&tls=%s", connString, d.Sslmode) + } case "postgres", "postgresql", "pgx": if d.isSocketConfig() { connString = fmt.Sprintf("host=%s user=%s dbname=%s password=%s", d.DbPath, d.User, d.DbName, d.Password) From 6b978b09b3a6fe1fc6001c2c40bb824a78fbbe06 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 15 May 2024 10:04:42 +0200 Subject: [PATCH 127/318] docker: conditionally update hub (#2948) --- .github/workflows/docker-tests.yml | 14 +++--- Dockerfile | 1 + Dockerfile.debian | 1 + docker/docker_start.sh | 69 ++++++++++++++++++++++++++---- docker/preload-hub-items | 22 ++++++++++ test/bin/preload-hub-items | 21 ++------- 6 files changed, 94 insertions(+), 34 deletions(-) create mode 100755 docker/preload-hub-items diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 3e87d3ba4f1..d3ae4f90d79 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -59,15 +59,15 @@ jobs: cd docker/test python -m pip install --upgrade pipenv wheel - #- name: "Cache virtualenvs" - # id: cache-pipenv - # uses: actions/cache@v4 - # with: - # path: ~/.local/share/virtualenvs - # key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + - name: "Cache virtualenvs" + id: cache-pipenv + uses: actions/cache@v4 + with: + path: ~/.local/share/virtualenvs + key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} - name: "Install dependencies" - #if: steps.cache-pipenv.outputs.cache-hit != 'true' + if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test pipenv install --deploy diff --git a/Dockerfile b/Dockerfile index aabb0a24fc5..faa50f3f79a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,6 +25,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 655eeb0225d..0ef1727f3e6 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -30,6 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/docker/docker_start.sh b/docker/docker_start.sh index dd96184ccbc..954dbd9fc79 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -6,6 +6,9 @@ set -e shopt -s inherit_errexit +# Note that "if function_name" in bash matches when the function returns 0, +# meaning successful execution. + # match true, TRUE, True, tRuE, etc. istrue() { case "$(echo "$1" | tr '[:upper:]' '[:lower:]')" in @@ -50,6 +53,52 @@ cscli() { command cscli -c "$CONFIG_FILE" "$@" } +run_hub_update() { + index_modification_time=$(stat -c %Y /etc/crowdsec/hub/.index.json 2>/dev/null) + # Run cscli hub update if no date or if the index file is older than 24h + if [ -z "$index_modification_time" ] || [ $(( $(date +%s) - index_modification_time )) -gt 86400 ]; then + cscli hub update + else + echo "Skipping hub update, index file is recent" + fi +} + +is_mounted() { + path=$(readlink -f "$1") + mounts=$(awk '{print $2}' /proc/mounts) + while true; do + if grep -qE ^"$path"$ <<< "$mounts"; then + echo "$path was found in a volume" + return 0 + fi + path=$(dirname "$path") + if [ "$path" = "/" ]; then + return 1 + fi + done + return 1 #unreachable +} + +run_hub_update_if_from_volume() { + if is_mounted "/etc/crowdsec/hub/.index.json"; then + echo "Running hub update" + run_hub_update + else + echo "Skipping hub update, index file is not in a volume" + fi +} + +run_hub_upgrade_if_from_volume() { + isfalse "$NO_HUB_UPGRADE" || return 0 + if is_mounted "/var/lib/crowdsec/data"; then + echo "Running hub upgrade" + cscli hub upgrade + else + echo "Skipping hub upgrade, data directory is not in a volume" + fi + +} + # conf_get [file_path] # retrieve a value from a file (by default $CONFIG_FILE) conf_get() { @@ -119,7 +168,12 @@ cscli_if_clean() { error_only="" echo "Running: cscli $error_only $itemtype $action \"$obj\" $*" # shellcheck disable=SC2086 - cscli $error_only "$itemtype" "$action" "$obj" "$@" + if ! cscli $error_only "$itemtype" "$action" "$obj" "$@"; then + echo "Failed to $action $itemtype/$obj, running hub update before retrying" + run_hub_update + # shellcheck disable=SC2086 + cscli $error_only "$itemtype" "$action" "$obj" "$@" + fi fi done } @@ -280,9 +334,9 @@ fi if [ "$GID" != "" ]; then if istrue "$(conf_get '.db_config.type == "sqlite"')"; then # don't fail if the db is not there yet - chown -f ":$GID" "$(conf_get '.db_config.db_path')" 2>/dev/null \ - && echo "sqlite database permissions updated" \ - || true + if chown -f ":$GID" "$(conf_get '.db_config.db_path')" 2>/dev/null; then + echo "sqlite database permissions updated" + fi fi fi @@ -304,11 +358,8 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)' ## Install hub items -cscli hub update || true - -if isfalse "$NO_HUB_UPGRADE"; then - cscli hub upgrade || true -fi +run_hub_update_if_from_volume || true +run_hub_upgrade_if_from_volume || true cscli_if_clean parsers install crowdsecurity/docker-logs cscli_if_clean parsers install crowdsecurity/cri-logs diff --git a/docker/preload-hub-items b/docker/preload-hub-items new file mode 100755 index 00000000000..45155d17af9 --- /dev/null +++ b/docker/preload-hub-items @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -eu + +# pre-download everything but don't install anything + +echo "Pre-downloading Hub content..." + +types=$(cscli hub types -o raw) + +for itemtype in $types; do + ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') + if [[ -n "${ALL_ITEMS}" ]]; then + #shellcheck disable=SC2086 + cscli "$itemtype" install \ + $ALL_ITEMS \ + --download-only \ + --error + fi +done + +echo " done." \ No newline at end of file diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index 14e9cff998c..319544d843d 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -9,20 +9,12 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # pre-download everything but don't install anything -echo -n "Purging existing hub..." +echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "${itemtype}" delete --all --error --purge --force -done - -echo " done." - -echo -n "Pre-downloading Hub content..." - -for itemtype in $types; do - ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name') + ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') if [[ -n "${ALL_ITEMS}" ]]; then #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ @@ -32,11 +24,4 @@ for itemtype in $types; do fi done -# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows. -# so we have to delete the links manually, and leave the downloaded files in place - -for itemtype in $types; do - "$CSCLI" "$itemtype" delete --all --error -done - -echo " done." +echo " done." \ No newline at end of file From ecd82ecfbd7dc09514029ef1cd99267b4c84ad0b Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Wed, 15 May 2024 09:21:45 +0100 Subject: [PATCH 128/318] feat: File notification plugin (#2932) * wip: basic impl of file notification no log rotate but might now do it :shrug: * wip: ticker to 2 seconds and lower some log levels * wip: remove redundant logrus formatter * wip: the plugin should not handle it own data queue since the plugin process may timeout, so instead have a function that uses said context and loop whilst locking the filewriter this may not be the best way :shrug:, however, I dont want multiple notifications to attempt to reopen the file if it has been rotated outside of the plugin context * wip: impl log rotation which checks on check append, however, this may cause some issues in slow systems as the mutex lock doesnt give up until the file is rotated, however, the plugin looks for context and will give up if the plugin broker decides its timeout and will retry once the plugin has pushed again * wip: update yaml dep * wip: me no english great * wip: even if the file has been rotated outside our control we should still compute the file size * wip: improve context handling with creating a custom io writer struct which checks the context before attempting to write * wip: used return byte count instead of calling a conversion again * wip: actually check the enabled flag on log rotate * wip: changed my mind, we check when we check file size * wip: use io copy instead for memory alloc * fix: add notification file to deb/rpm build --- cmd/notification-file/Makefile | 17 +++ cmd/notification-file/file.yaml | 23 +++ cmd/notification-file/main.go | 250 ++++++++++++++++++++++++++++++++ debian/install | 1 + debian/rules | 1 + rpm/SPECS/crowdsec.spec | 5 +- wizard.sh | 4 + 7 files changed, 300 insertions(+), 1 deletion(-) create mode 100644 cmd/notification-file/Makefile create mode 100644 cmd/notification-file/file.yaml create mode 100644 cmd/notification-file/main.go diff --git a/cmd/notification-file/Makefile b/cmd/notification-file/Makefile new file mode 100644 index 00000000000..4504328c49a --- /dev/null +++ b/cmd/notification-file/Makefile @@ -0,0 +1,17 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +GO = go +GOBUILD = $(GO) build + +BINARY_NAME = notification-file$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) -o $(BINARY_NAME) + +.PHONY: clean +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/cmd/notification-file/file.yaml b/cmd/notification-file/file.yaml new file mode 100644 index 00000000000..61c77b9eb49 --- /dev/null +++ b/cmd/notification-file/file.yaml @@ -0,0 +1,23 @@ +# Don't change this +type: file + +name: file_default # this must match with the registered plugin in the profile +log_level: info # Options include: trace, debug, info, warn, error, off + +# This template render all events as ndjson +format: | + {{range . -}} + { "time": "{{.StopAt}}", "program": "crowdsec", "alert": {{. | toJson }} } + {{ end -}} + +# group_wait: # duration to wait collecting alerts before sending to this plugin, eg "30s" +# group_threshold: # if alerts exceed this, then the plugin will be sent the message. eg "10" + +#Use full path EG /tmp/crowdsec_alerts.json or %TEMP%\crowdsec_alerts.json +log_path: "/tmp/crowdsec_alerts.json" +rotate: + enabled: true # Change to false if you want to handle log rotate on system basis + max_size: 500 # in MB + max_files: 5 + max_age: 5 + compress: true diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go new file mode 100644 index 00000000000..467bdd4a4ff --- /dev/null +++ b/cmd/notification-file/main.go @@ -0,0 +1,250 @@ +package main + +import ( + "compress/gzip" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "gopkg.in/yaml.v3" +) + +var ( + FileWriter *os.File + FileWriteMutex *sync.Mutex + FileSize int64 +) + +type FileWriteCtx struct { + Ctx context.Context + Writer io.Writer +} + +func (w *FileWriteCtx) Write(p []byte) (n int, err error) { + if err := w.Ctx.Err(); err != nil { + return 0, err + } + return w.Writer.Write(p) +} + +type PluginConfig struct { + Name string `yaml:"name"` + LogLevel string `yaml:"log_level"` + LogPath string `yaml:"log_path"` + LogRotate LogRotate `yaml:"rotate"` +} + +type LogRotate struct { + MaxSize int `yaml:"max_size"` + MaxAge int `yaml:"max_age"` + MaxFiles int `yaml:"max_files"` + Enabled bool `yaml:"enabled"` + Compress bool `yaml:"compress"` +} + +type FilePlugin struct { + PluginConfigByName map[string]PluginConfig +} + +var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "file-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +func (r *LogRotate) rotateLogs(cfg PluginConfig) { + // Rotate the log file + err := r.rotateLogFile(cfg.LogPath, r.MaxFiles) + if err != nil { + logger.Error("Failed to rotate log file", "error", err) + } + // Reopen the FileWriter + FileWriter.Close() + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logger.Error("Failed to reopen log file", "error", err) + } + // Reset the file size + FileInfo, err := FileWriter.Stat() + if err != nil { + logger.Error("Failed to get file info", "error", err) + } + FileSize = FileInfo.Size() +} + +func (r *LogRotate) rotateLogFile(logPath string, maxBackups int) error { + // Rename the current log file + backupPath := logPath + "." + time.Now().Format("20060102-150405") + err := os.Rename(logPath, backupPath) + if err != nil { + return err + } + glob := logPath + ".*" + if r.Compress { + glob = logPath + ".*.gz" + err = compressFile(backupPath) + if err != nil { + return err + } + } + + // Remove old backups + files, err := filepath.Glob(glob) + if err != nil { + return err + } + + sort.Sort(sort.Reverse(sort.StringSlice(files))) + + for i, file := range files { + logger.Trace("Checking file", "file", file, "index", i, "maxBackups", maxBackups) + if i >= maxBackups { + logger.Trace("Removing file as over max backup count", "file", file) + os.Remove(file) + } else { + // Check the age of the file + fileInfo, err := os.Stat(file) + if err != nil { + return err + } + age := time.Since(fileInfo.ModTime()).Hours() + if age > float64(r.MaxAge*24) { + logger.Trace("Removing file as age was over configured amount", "file", file, "age", age) + os.Remove(file) + } + } + } + + return nil +} + +func compressFile(src string) error { + // Open the source file for reading + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + // Create the destination file + dstFile, err := os.Create(src + ".gz") + if err != nil { + return err + } + defer dstFile.Close() + + // Create a gzip writer + gw := gzip.NewWriter(dstFile) + defer gw.Close() + + // Read the source file and write its contents to the gzip writer + _, err = io.Copy(gw, srcFile) + if err != nil { + return err + } + + // Delete the original (uncompressed) backup file + err = os.Remove(src) + if err != nil { + return err + } + + return nil +} + +func WriteToFileWithCtx(ctx context.Context, cfg PluginConfig, log string) error { + FileWriteMutex.Lock() + defer FileWriteMutex.Unlock() + originalFileInfo, err := FileWriter.Stat() + if err != nil { + logger.Error("Failed to get file info", "error", err) + } + currentFileInfo, _ := os.Stat(cfg.LogPath) + if !os.SameFile(originalFileInfo, currentFileInfo) { + // The file has been rotated outside our control + logger.Info("Log file has been rotated or missing attempting to reopen it") + FileWriter.Close() + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + FileInfo, err := FileWriter.Stat() + if err != nil { + return err + } + FileSize = FileInfo.Size() + logger.Info("Log file has been reopened successfully") + } + n, err := io.WriteString(&FileWriteCtx{Ctx: ctx, Writer: FileWriter}, log) + if err == nil { + FileSize += int64(n) + if FileSize > int64(cfg.LogRotate.MaxSize)*1024*1024 && cfg.LogRotate.Enabled { + logger.Debug("Rotating log file", "file", cfg.LogPath) + // Rotate the log file + cfg.LogRotate.rotateLogs(cfg) + } + } + return err +} + +func (s *FilePlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := s.PluginConfigByName[notification.Name]; !ok { + return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) + } + cfg := s.PluginConfigByName[notification.Name] + + return &protobufs.Empty{}, WriteToFileWithCtx(ctx, cfg, notification.Text) +} + +func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) + if err != nil { + logger.Error("Failed to unmarshal config", "error", err) + return &protobufs.Empty{}, err + } + FileWriteMutex = &sync.Mutex{} + FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logger.Error("Failed to open log file", "error", err) + return &protobufs.Empty{}, err + } + FileInfo, err := FileWriter.Stat() + if err != nil { + logger.Error("Failed to get file info", "error", err) + return &protobufs.Empty{}, err + } + FileSize = FileInfo.Size() + s.PluginConfigByName[d.Name] = d + logger.SetLevel(hclog.LevelFromString(d.LogLevel)) + return &protobufs.Empty{}, err +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + sp := &FilePlugin{PluginConfigByName: make(map[string]PluginConfig)} + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "file": &protobufs.NotifierPlugin{ + Impl: sp, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/debian/install b/debian/install index 3153244b8e9..fa422cac8d9 100644 --- a/debian/install +++ b/debian/install @@ -11,3 +11,4 @@ cmd/notification-http/http.yaml etc/crowdsec/notifications/ cmd/notification-splunk/splunk.yaml etc/crowdsec/notifications/ cmd/notification-email/email.yaml etc/crowdsec/notifications/ cmd/notification-sentinel/sentinel.yaml etc/crowdsec/notifications/ +cmd/notification-file/file.yaml etc/crowdsec/notifications/ diff --git a/debian/rules b/debian/rules index 50daae08754..c11771282ea 100755 --- a/debian/rules +++ b/debian/rules @@ -31,6 +31,7 @@ override_dh_auto_install: install -m 551 cmd/notification-splunk/notification-splunk debian/crowdsec/usr/lib/crowdsec/plugins/ install -m 551 cmd/notification-email/notification-email debian/crowdsec/usr/lib/crowdsec/plugins/ install -m 551 cmd/notification-sentinel/notification-sentinel debian/crowdsec/usr/lib/crowdsec/plugins/ + install -m 551 cmd/notification-file/notification-file debian/crowdsec/usr/lib/crowdsec/plugins/ cp cmd/crowdsec/crowdsec debian/crowdsec/usr/bin cp cmd/crowdsec-cli/cscli debian/crowdsec/usr/bin diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index 0a20dc97deb..ab71b650d11 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -67,13 +67,14 @@ install -m 551 cmd/notification-http/notification-http %{buildroot}%{_libdir}/%{ install -m 551 cmd/notification-splunk/notification-splunk %{buildroot}%{_libdir}/%{name}/plugins/ install -m 551 cmd/notification-email/notification-email %{buildroot}%{_libdir}/%{name}/plugins/ install -m 551 cmd/notification-sentinel/notification-sentinel %{buildroot}%{_libdir}/%{name}/plugins/ +install -m 551 cmd/notification-file/notification-file %{buildroot}%{_libdir}/%{name}/plugins/ install -m 600 cmd/notification-slack/slack.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-http/http.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-splunk/splunk.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-email/email.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-sentinel/sentinel.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ - +install -m 600 cmd/notification-file/file.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ %clean rm -rf %{buildroot} @@ -88,6 +89,7 @@ rm -rf %{buildroot} %{_libdir}/%{name}/plugins/notification-splunk %{_libdir}/%{name}/plugins/notification-email %{_libdir}/%{name}/plugins/notification-sentinel +%{_libdir}/%{name}/plugins/notification-file %{_sysconfdir}/%{name}/patterns/linux-syslog %{_sysconfdir}/%{name}/patterns/ruby %{_sysconfdir}/%{name}/patterns/nginx @@ -123,6 +125,7 @@ rm -rf %{buildroot} %config(noreplace) %{_sysconfdir}/%{name}/notifications/splunk.yaml %config(noreplace) %{_sysconfdir}/%{name}/notifications/email.yaml %config(noreplace) %{_sysconfdir}/%{name}/notifications/sentinel.yaml +%config(noreplace) %{_sysconfdir}/%{name}/notifications/file.yaml %config(noreplace) %{_sysconfdir}/cron.daily/%{name} %{_unitdir}/%{name}.service diff --git a/wizard.sh b/wizard.sh index a3afc789bf5..226b4e0609b 100755 --- a/wizard.sh +++ b/wizard.sh @@ -82,12 +82,14 @@ SLACK_PLUGIN_BINARY="./cmd/notification-slack/notification-slack" SPLUNK_PLUGIN_BINARY="./cmd/notification-splunk/notification-splunk" EMAIL_PLUGIN_BINARY="./cmd/notification-email/notification-email" SENTINEL_PLUGIN_BINARY="./cmd/notification-sentinel/notification-sentinel" +FILE_PLUGIN_BINARY="./cmd/notification-file/notification-file" HTTP_PLUGIN_CONFIG="./cmd/notification-http/http.yaml" SLACK_PLUGIN_CONFIG="./cmd/notification-slack/slack.yaml" SPLUNK_PLUGIN_CONFIG="./cmd/notification-splunk/splunk.yaml" EMAIL_PLUGIN_CONFIG="./cmd/notification-email/email.yaml" SENTINEL_PLUGIN_CONFIG="./cmd/notification-sentinel/sentinel.yaml" +FILE_PLUGIN_CONFIG="./cmd/notification-file/file.yaml" BACKUP_DIR=$(mktemp -d) @@ -525,6 +527,7 @@ install_plugins(){ cp ${HTTP_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} cp ${EMAIL_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} cp ${SENTINEL_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} + cp ${FILE_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} if [[ ${DOCKER_MODE} == "false" ]]; then cp -n ${SLACK_PLUGIN_CONFIG} /etc/crowdsec/notifications/ @@ -532,6 +535,7 @@ install_plugins(){ cp -n ${HTTP_PLUGIN_CONFIG} /etc/crowdsec/notifications/ cp -n ${EMAIL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ cp -n ${SENTINEL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ + cp -n ${FILE_PLUGIN_CONFIG} /etc/crowdsec/notifications/ fi } From 41ec90ae8c6f38a11a99b2c0a07138717b4659af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 May 2024 10:37:25 +0200 Subject: [PATCH 129/318] make: remove unused targets (#2998) --- cmd/crowdsec-cli/Makefile | 14 ------------ cmd/crowdsec/Makefile | 45 --------------------------------------- wizard.sh | 1 - 3 files changed, 60 deletions(-) diff --git a/cmd/crowdsec-cli/Makefile b/cmd/crowdsec-cli/Makefile index 392361ef82e..6d6e4da8dbd 100644 --- a/cmd/crowdsec-cli/Makefile +++ b/cmd/crowdsec-cli/Makefile @@ -8,8 +8,6 @@ GO = go GOBUILD = $(GO) build BINARY_NAME = cscli$(EXT) -PREFIX ?= "/" -BIN_PREFIX = $(PREFIX)"/usr/local/bin/" .PHONY: all all: clean build @@ -17,17 +15,5 @@ all: clean build build: clean $(GOBUILD) $(LD_OPTS) -o $(BINARY_NAME) -.PHONY: install -install: install-conf install-bin - -install-conf: - -install-bin: - @install -v -m 755 -D "$(BINARY_NAME)" "$(BIN_PREFIX)/$(BINARY_NAME)" || exit - -uninstall: - @$(RM) $(CSCLI_CONFIG) $(WIN_IGNORE_ERR) - @$(RM) $(BIN_PREFIX)$(BINARY_NAME) $(WIN_IGNORE_ERR) - clean: @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/cmd/crowdsec/Makefile b/cmd/crowdsec/Makefile index 7425d970ad1..39f807cab88 100644 --- a/cmd/crowdsec/Makefile +++ b/cmd/crowdsec/Makefile @@ -10,13 +10,6 @@ GOTEST = $(GO) test CROWDSEC_BIN = crowdsec$(EXT) # names longer than 15 chars break 'pgrep' -PREFIX ?= "/" -CFG_PREFIX = $(PREFIX)"/etc/crowdsec/config/" -BIN_PREFIX = $(PREFIX)"/usr/local/bin/" -DATA_PREFIX = $(PREFIX)"/var/run/crowdsec/" -PID_DIR = $(PREFIX)"/var/run/" - -SYSTEMD_PATH_FILE = "/etc/systemd/system/crowdsec.service" .PHONY: all all: clean test build @@ -29,41 +22,3 @@ test: clean: @$(RM) $(CROWDSEC_BIN) $(WIN_IGNORE_ERR) - -.PHONY: install -install: install-conf install-bin - -.PHONY: install-conf -install-conf: - mkdir -p $(DATA_PREFIX) || exit - (cd ../.. / && find ./data -type f -exec install -Dm 755 "{}" "$(DATA_PREFIX){}" \; && cd ./cmd/crowdsec) || exit - (cd ../../config && find ./patterns -type f -exec install -Dm 755 "{}" "$(CFG_PREFIX){}" \; && cd ../cmd/crowdsec) || exit - mkdir -p "$(CFG_PREFIX)" || exit - mkdir -p "$(CFG_PREFIX)/parsers" || exit - mkdir -p "$(CFG_PREFIX)/scenarios" || exit - mkdir -p "$(CFG_PREFIX)/postoverflows" || exit - mkdir -p "$(CFG_PREFIX)/collections" || exit - mkdir -p "$(CFG_PREFIX)/patterns" || exit - install -v -m 755 -D ../../config/prod.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/dev.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/acquis.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/profiles.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/api.yaml "$(CFG_PREFIX)" || exit - mkdir -p $(PID_DIR) || exit - PID=$(PID_DIR) DATA=$(DATA_PREFIX)"/data/" CFG=$(CFG_PREFIX) envsubst < ../../config/prod.yaml > $(CFG_PREFIX)"/default.yaml" - -.PHONY: install-bin -install-bin: - install -v -m 755 -D "$(CROWDSEC_BIN)" "$(BIN_PREFIX)/$(CROWDSEC_BIN)" || exit - -.PHONY: systemd -systemd: install - CFG=$(CFG_PREFIX) PID=$(PID_DIR) BIN=$(BIN_PREFIX)"/"$(CROWDSEC_BIN) envsubst < ../../config/crowdsec.service > "$(SYSTEMD_PATH_FILE)" - systemctl daemon-reload - -.PHONY: uninstall -uninstall: - $(RM) $(CFG_PREFIX) $(WIN_IGNORE_ERR) - $(RM) $(DATA_PREFIX) $(WIN_IGNORE_ERR) - $(RM) "$(BIN_PREFIX)/$(CROWDSEC_BIN)" $(WIN_IGNORE_ERR) - $(RM) "$(SYSTEMD_PATH_FILE)" $(WIN_IGNORE_ERR) diff --git a/wizard.sh b/wizard.sh index 226b4e0609b..6e215365f6c 100755 --- a/wizard.sh +++ b/wizard.sh @@ -18,7 +18,6 @@ NC='\033[0m' SILENT="false" DOCKER_MODE="false" -CROWDSEC_RUN_DIR="/var/run" CROWDSEC_LIB_DIR="/var/lib/crowdsec" CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" From ccab6e991001bbd3ac850ae30fd03d9237ae34df Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 May 2024 10:38:36 +0200 Subject: [PATCH 130/318] bats: clean up extra output (#3008) --- test/bats/40_cold-logs.bats | 6 +++--- test/bats/40_live-ban.bats | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 0e167d3d077..070a9eac5f1 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -14,9 +14,9 @@ setup_file() { # we reset config and data, and only run the daemon once for all the tests in this file ./instance-data load - cscli collections install crowdsecurity/sshd --error - cscli parsers install crowdsecurity/syslog-logs --error - cscli parsers install crowdsecurity/dateparse-enrich --error + cscli collections install crowdsecurity/sshd --error >/dev/null + cscli parsers install crowdsecurity/syslog-logs --error >/dev/null + cscli parsers install crowdsecurity/dateparse-enrich --error >/dev/null ./instance-crowdsec start } diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index 122ea05e41a..fb5fd1fd435 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -14,10 +14,9 @@ setup_file() { # we reset config and data, but run the daemon only in the tests that need it ./instance-data load - cscli collections install crowdsecurity/sshd --error - cscli parsers install crowdsecurity/syslog-logs --error - cscli parsers install crowdsecurity/dateparse-enrich --error - + cscli collections install crowdsecurity/sshd --error >/dev/null + cscli parsers install crowdsecurity/syslog-logs --error >/dev/null + cscli parsers install crowdsecurity/dateparse-enrich --error >/dev/null } teardown_file() { From cc63729b2cae9cda94786d8374097a7f5863167b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 May 2024 10:52:43 +0200 Subject: [PATCH 131/318] version: write to stdout, add missing newline (#3012) * pkg/cwversion cleanup - add missing newline between version and codename - add more information to "support dump" - write "cscli version" and "crowdsec -version" to stdout, not stderr * fix func test * lint --- cmd/crowdsec-cli/support.go | 2 +- cmd/crowdsec-cli/version.go | 8 +++++--- cmd/crowdsec/main.go | 2 +- pkg/cwversion/version.go | 33 ++++++++++++--------------------- test/bats/01_crowdsec.bats | 5 +++++ test/bats/01_cscli.bats | 20 ++++++++++---------- 6 files changed, 34 insertions(+), 36 deletions(-) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 54b2e7ad9ad..61fa2b55817 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -123,7 +123,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { func (cli *cliSupport) dumpVersion(zw *zip.Writer) { log.Info("Collecting version") - cli.writeToZip(zw, SUPPORT_VERSION_PATH, time.Now(), strings.NewReader(cwversion.ShowStr())) + cli.writeToZip(zw, SUPPORT_VERSION_PATH, time.Now(), strings.NewReader(cwversion.FullString())) } func (cli *cliSupport) dumpFeatures(zw *zip.Writer) { diff --git a/cmd/crowdsec-cli/version.go b/cmd/crowdsec-cli/version.go index de36c9be28f..7ec5c459968 100644 --- a/cmd/crowdsec-cli/version.go +++ b/cmd/crowdsec-cli/version.go @@ -1,6 +1,8 @@ package main import ( + "os" + "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/pkg/cwversion" @@ -12,14 +14,14 @@ func NewCLIVersion() *cliVersion { return &cliVersion{} } -func (cli cliVersion) NewCommand() *cobra.Command { +func (cliVersion) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "version", Short: "Display version", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { - cwversion.Show() + _, _ = os.Stdout.WriteString(cwversion.FullString()) }, } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 0d96692ba5f..26e39eb069c 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -369,7 +369,7 @@ func main() { } if flags.PrintVersion { - cwversion.Show() + os.Stdout.WriteString(cwversion.FullString()) os.Exit(0) } diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 6f85704d8e5..ac51567c2fc 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -3,7 +3,6 @@ package cwversion import ( "encoding/json" "fmt" - "log" "net/http" "runtime" "strings" @@ -30,44 +29,36 @@ func versionWithTag() string { ret := version.Version if !strings.HasSuffix(ret, version.Tag) { - ret += fmt.Sprintf("-%s", version.Tag) + ret += "-" + version.Tag } return ret } -func ShowStr() string { - ret := fmt.Sprintf("version: %s", versionWithTag()) +func FullString() string { + ret := fmt.Sprintf("version: %s\n", versionWithTag()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) ret += fmt.Sprintf("Platform: %s\n", System) + ret += fmt.Sprintf("libre2: %s\n", Libre2) + ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) + ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) + ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) + ret += fmt.Sprintf("Constraint_acquis: %s\n", Constraint_acquis) return ret } -func Show() { - log.Printf("version: %s", versionWithTag()) - log.Printf("Codename: %s", Codename) - log.Printf("BuildDate: %s", version.BuildDate) - log.Printf("GoVersion: %s", version.GoVersion) - log.Printf("Platform: %s\n", System) - log.Printf("libre2: %s\n", Libre2) - log.Printf("Constraint_parser: %s", Constraint_parser) - log.Printf("Constraint_scenario: %s", Constraint_scenario) - log.Printf("Constraint_api: %s", Constraint_api) - log.Printf("Constraint_acquis: %s", Constraint_acquis) -} - func VersionStr() string { return fmt.Sprintf("%s-%s-%s", version.Version, System, version.Tag) } func VersionStrip() string { - version := strings.Split(version.Version, "~") - version = strings.Split(version[0], "-") + ret := strings.Split(version.Version, "~") + ret = strings.Split(ret[0], "-") - return version[0] + return ret[0] } func Satisfies(strvers string, constraint string) (bool, error) { @@ -90,7 +81,7 @@ func Satisfies(strvers string, constraint string) (bool, error) { // Latest return latest crowdsec version based on github func Latest() (string, error) { - latest := make(map[string]interface{}) + latest := make(map[string]any) resp, err := http.Get("https://version.crowdsec.net/latest") if err != nil { diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index d8d369eedf4..7a449ebd047 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -36,6 +36,11 @@ teardown() { rune -0 wait-for --err "argument provided but not defined: trololo" "$CROWDSEC" trololo } +@test "crowdsec -version" { + rune -0 "$CROWDSEC" -version + assert_output --partial "version:" +} + @test "crowdsec (no api and no agent)" { rune -0 wait-for \ --err "you must run at least the API Server or crowdsec" \ diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 33dd2e12ec2..8dfdf701a95 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -40,20 +40,20 @@ teardown() { @test "cscli version" { rune -0 cscli version - assert_stderr --partial "version:" - assert_stderr --partial "Codename:" - assert_stderr --partial "BuildDate:" - assert_stderr --partial "GoVersion:" - assert_stderr --partial "Platform:" - assert_stderr --partial "Constraint_parser:" - assert_stderr --partial "Constraint_scenario:" - assert_stderr --partial "Constraint_api:" - assert_stderr --partial "Constraint_acquis:" + assert_output --partial "version:" + assert_output --partial "Codename:" + assert_output --partial "BuildDate:" + assert_output --partial "GoVersion:" + assert_output --partial "Platform:" + assert_output --partial "Constraint_parser:" + assert_output --partial "Constraint_scenario:" + assert_output --partial "Constraint_api:" + assert_output --partial "Constraint_acquis:" # should work without configuration file rm "$CONFIG_YAML" rune -0 cscli version - assert_stderr --partial "version:" + assert_output --partial "version:" } @test "cscli help" { From 1b894a292ba837cac09eb8e5fde6d58231a69daf Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 15 May 2024 15:33:43 +0200 Subject: [PATCH 132/318] `GetActiveDecisionsCount()`and `GetActiveDecisionsTimeLeft()`expr helpers (#3013) --- pkg/database/decisions.go | 62 ++++++++ pkg/exprhelpers/expr_lib.go | 14 ++ pkg/exprhelpers/exprlib_test.go | 262 ++++++++++++++++++++++++++++++++ pkg/exprhelpers/helpers.go | 30 +++- 4 files changed, 367 insertions(+), 1 deletion(-) diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 20a49c79143..941fc5c7791 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -640,6 +640,68 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return count, nil } +func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz, count int + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + + if err != nil { + return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + } + + contains := true + decisions := c.Ent.Decision.Query() + + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) + } + + decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) + + count, err = decisions.Count(c.CTX) + if err != nil { + return 0, fmt.Errorf("fail to count decisions: %w", err) + } + + return count, nil +} + +func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.Duration, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + + if err != nil { + return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + } + + contains := true + decisions := c.Ent.Decision.Query().Where( + decision.UntilGT(time.Now().UTC()), + ) + + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) + } + + decisions = decisions.Order(ent.Desc(decision.FieldUntil)) + + decision, err := decisions.First(c.CTX) + if err != nil && !ent.IsNotFound(err) { + return 0, fmt.Errorf("fail to get decision: %w", err) + } + + if decision == nil { + return 0, nil + } + + return decision.Until.Sub(time.Now().UTC()), nil +} + func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 520799287eb..5041b234db1 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -231,6 +231,20 @@ var exprFuncs = []exprCustomFunc{ new(func(string) int), }, }, + { + name: "GetActiveDecisionsCount", + function: GetActiveDecisionsCount, + signature: []interface{}{ + new(func(string) int), + }, + }, + { + name: "GetActiveDecisionsTimeLeft", + function: GetActiveDecisionsTimeLeft, + signature: []interface{}{ + new(func(string) time.Duration), + }, + }, { name: "GetDecisionsSinceCount", function: GetDecisionsSinceCount, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 9d5a6556b25..38528083272 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1118,6 +1118,268 @@ func TestGetDecisionsSinceCount(t *testing.T) { } } +func TestGetActiveDecisionsCount(t *testing.T) { + existingIP := "1.2.3.4" + unknownIP := "1.2.3.5" + + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(existingIP) + if err != nil { + t.Errorf("unable to convert '%s' to int: %s", existingIP, err) + } + + // Add sample data to DB + dbClient = getDBClient(t) + + decision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if decision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + expiredDecision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(-time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if expiredDecision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + err = Init(dbClient) + require.NoError(t, err) + + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "GetActiveDecisionsCount() test: existing IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + }, + code: "Sprintf('%d', GetActiveDecisionsCount(Alert.GetValue()))", + result: "1", + err: "", + }, + { + name: "GetActiveDecisionsCount() test: unknown IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + }, + code: "Sprintf('%d', GetActiveDecisionsCount(Alert.GetValue()))", + result: "0", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, GetExprOptions(test.env)...) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestGetActiveDecisionsTimeLeft(t *testing.T) { + existingIP := "1.2.3.4" + unknownIP := "1.2.3.5" + + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(existingIP) + if err != nil { + t.Errorf("unable to convert '%s' to int: %s", existingIP, err) + } + + // Add sample data to DB + dbClient = getDBClient(t) + + decision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if decision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + longerDecision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(2 * time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if longerDecision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + err = Init(dbClient) + require.NoError(t, err) + + tests := []struct { + name string + env map[string]interface{} + code string + min float64 + max float64 + err string + }{ + { + name: "GetActiveDecisionsTimeLeft() test: existing IP time left", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue())", + min: 7195, // 5 seconds margin to make sure the test doesn't fail randomly in the CI + max: 7200, + err: "", + }, + { + name: "GetActiveDecisionsTimeLeft() test: unknown IP time left", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue())", + min: 0, + max: 0, + err: "", + }, + { + name: "GetActiveDecisionsTimeLeft() test: existing IP and call time.Duration method", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue()).Hours()", + min: 2, + max: 2, + }, + { + name: "GetActiveDecisionsTimeLeft() test: unknown IP and call time.Duration method", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue()).Hours()", + min: 0, + max: 0, + }, + } + + delta := 0.0001 + + for _, test := range tests { + program, err := expr.Compile(test.code, GetExprOptions(test.env)...) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + switch o := output.(type) { + case time.Duration: + require.LessOrEqual(t, int(o.Seconds()), int(test.max)) + require.GreaterOrEqual(t, int(o.Seconds()), int(test.min)) + case float64: + require.LessOrEqual(t, o, test.max+delta) + require.GreaterOrEqual(t, o, test.min-delta) + default: + t.Fatalf("GetActiveDecisionsTimeLeft() should return a time.Duration or a float64") + } + } + +} + func TestParseUnixTime(t *testing.T) { tests := []struct { name string diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 79a621c7d35..e4e38e48474 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -550,7 +550,7 @@ func GetDecisionsSinceCount(params ...any) (any, error) { value := params[0].(string) since := params[1].(string) if dbClient == nil { - log.Error("No database config to call GetDecisionsCount()") + log.Error("No database config to call GetDecisionsSinceCount()") return 0, nil } sinceDuration, err := time.ParseDuration(since) @@ -567,6 +567,34 @@ func GetDecisionsSinceCount(params ...any) (any, error) { return count, nil } +func GetActiveDecisionsCount(params ...any) (any, error) { + value := params[0].(string) + if dbClient == nil { + log.Error("No database config to call GetActiveDecisionsCount()") + return 0, nil + } + count, err := dbClient.CountActiveDecisionsByValue(value) + if err != nil { + log.Errorf("Failed to get active decisions count from value '%s'", value) + return 0, err + } + return count, nil +} + +func GetActiveDecisionsTimeLeft(params ...any) (any, error) { + value := params[0].(string) + if dbClient == nil { + log.Error("No database config to call GetActiveDecisionsTimeLeft()") + return 0, nil + } + timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(value) + if err != nil { + log.Errorf("Failed to get active decisions time left from value '%s'", value) + return 0, err + } + return timeLeft, nil +} + // func LookupHost(value string) []string { func LookupHost(params ...any) (any, error) { value := params[0].(string) From ccf08e56d9be2e0a621803d6b203230a1747aea5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 May 2024 10:58:00 +0200 Subject: [PATCH 133/318] bats: no need for openssl, netcat; allow to prevent tests from running with a lock (#3016) * bats: lock/unlock instance data; replace openssl w/ cfssl; update dep list * concat pem without openssl :-/ * unused/unreachable code * lint * redundant {} (shellharden) --- test/README.md | 7 +++++-- test/bats/11_bouncers_tls.bats | 7 ++++--- test/bats/30_machines_tls.bats | 7 ++++--- test/bin/mock-http.py | 6 ++++-- test/bin/wait-for | 6 +++--- test/bin/wait-for-port | 4 ---- test/disable-capi | 2 +- test/enable-capi | 4 ++-- test/instance-crowdsec | 6 +++--- test/instance-data | 6 +++--- test/instance-db | 6 +++--- test/instance-mock-http | 10 +++++----- test/lib/config/config-local | 17 ++++++++++++++++- test/run-tests | 6 ++++-- 14 files changed, 57 insertions(+), 37 deletions(-) diff --git a/test/README.md b/test/README.md index 723ee5d3e9b..4ed132b2cfa 100644 --- a/test/README.md +++ b/test/README.md @@ -61,8 +61,6 @@ architectures. - `curl` - `daemonize` - `jq` - - `nc` - - `openssl` - `python3` ## Running all tests @@ -241,6 +239,11 @@ according to the specific needs of the group of tests in the file. crowdsec instance. Crowdsec must not be running while this operation is performed. + - instance-data lock/unlock + +When playing around with a local crowdsec installation, you can run "instance-data lock" +to prevent the bats suite from running, so it won't overwrite your configuration or data. + - `instance-crowdsec [ start | stop ]` Runs (or stops) crowdsec as a background process. PID and lockfiles are diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 84de3d2f488..6b4986d45d7 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -35,13 +35,14 @@ setup_file() { # Generate revoked client certs for cert_name in "revoked_1" "revoked_2"; do cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" done # Generate separate CRL blocks and concatenate them for cert_name in "revoked_1" "revoked_2"; do - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" + echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" done cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index b6d089d68e7..52231704558 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -38,13 +38,14 @@ setup_file() { # Generate revoked client cert for cert_name in "revoked_1" "revoked_2"; do cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" done # Generate separate CRL blocks and concatenate them for cert_name in "revoked_1" "revoked_2"; do - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" + echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" done cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" diff --git a/test/bin/mock-http.py b/test/bin/mock-http.py index 3f26271b400..d11a4ebf717 100644 --- a/test/bin/mock-http.py +++ b/test/bin/mock-http.py @@ -6,6 +6,7 @@ from http.server import HTTPServer, BaseHTTPRequestHandler + class RequestHandler(BaseHTTPRequestHandler): def do_POST(self): request_path = self.path @@ -18,7 +19,7 @@ def do_POST(self): } print(json.dumps(log)) self.send_response(200) - self.send_header('Content-type','application/json') + self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(json.dumps({}).encode()) self.wfile.flush() @@ -27,6 +28,7 @@ def do_POST(self): def log_message(self, format, *args): return + def main(argv): try: port = int(argv[1]) @@ -42,6 +44,6 @@ def main(argv): return 0 -if __name__ == "__main__" : +if __name__ == "__main__": logging.basicConfig(level=logging.INFO) sys.exit(main(sys.argv)) diff --git a/test/bin/wait-for b/test/bin/wait-for index 6c6fdd5ce2b..b226783d44b 100755 --- a/test/bin/wait-for +++ b/test/bin/wait-for @@ -39,7 +39,7 @@ async def monitor(cmd, args, want_out, want_err, timeout): status = None - async def read_stream(p, stream, outstream, pattern): + async def read_stream(stream, outstream, pattern): nonlocal status if stream is None: return @@ -84,8 +84,8 @@ async def monitor(cmd, args, want_out, want_err, timeout): await asyncio.wait_for( asyncio.wait([ asyncio.create_task(process.wait()), - asyncio.create_task(read_stream(process, process.stdout, sys.stdout, out_regex)), - asyncio.create_task(read_stream(process, process.stderr, sys.stderr, err_regex)) + asyncio.create_task(read_stream(process.stdout, sys.stdout, out_regex)), + asyncio.create_task(read_stream(process.stderr, sys.stderr, err_regex)) ]), timeout) if status is None: status = process.returncode diff --git a/test/bin/wait-for-port b/test/bin/wait-for-port index 15408b8e5a0..72f26bf409c 100755 --- a/test/bin/wait-for-port +++ b/test/bin/wait-for-port @@ -54,10 +54,6 @@ def main(argv): if not args.quiet: write_error(ex) sys.exit(1) - else: - sys.exit(0) - - sys.exit(1) if __name__ == "__main__": diff --git a/test/disable-capi b/test/disable-capi index f19bef5314c..b847accae48 100755 --- a/test/disable-capi +++ b/test/disable-capi @@ -5,4 +5,4 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck disable=SC1091 . "${THIS_DIR}/.environment.sh" -yq e 'del(.api.server.online_client)' -i "${CONFIG_YAML}" +yq e 'del(.api.server.online_client)' -i "$CONFIG_YAML" diff --git a/test/enable-capi b/test/enable-capi index ddbf8764c44..59980e6a059 100755 --- a/test/enable-capi +++ b/test/enable-capi @@ -5,7 +5,7 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck disable=SC1091 . "${THIS_DIR}/.environment.sh" -online_api_credentials="$(dirname "${CONFIG_YAML}")/online_api_credentials.yaml" +online_api_credentials="$(dirname "$CONFIG_YAML")/online_api_credentials.yaml" export online_api_credentials -yq e '.api.server.online_client.credentials_path=strenv(online_api_credentials)' -i "${CONFIG_YAML}" +yq e '.api.server.online_client.credentials_path=strenv(online_api_credentials)' -i "$CONFIG_YAML" diff --git a/test/instance-crowdsec b/test/instance-crowdsec index d87145c3881..f0cef729693 100755 --- a/test/instance-crowdsec +++ b/test/instance-crowdsec @@ -2,15 +2,15 @@ #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" || exit 1 +cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh backend_script="./lib/init/crowdsec-${INIT_BACKEND}" -if [[ ! -x "${backend_script}" ]]; then +if [[ ! -x "$backend_script" ]]; then echo "unknown init system '${INIT_BACKEND}'" >&2 exit 1 fi -exec "${backend_script}" "$@" +exec "$backend_script" "$@" diff --git a/test/instance-data b/test/instance-data index 02742b4ec85..e4e76d3980a 100755 --- a/test/instance-data +++ b/test/instance-data @@ -2,15 +2,15 @@ #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" || exit 1 +cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh backend_script="./lib/config/config-${CONFIG_BACKEND}" -if [[ ! -x "${backend_script}" ]]; then +if [[ ! -x "$backend_script" ]]; then echo "unknown config backend '${CONFIG_BACKEND}'" >&2 exit 1 fi -exec "${backend_script}" "$@" +exec "$backend_script" "$@" diff --git a/test/instance-db b/test/instance-db index fbbc18dc433..de09465bc32 100755 --- a/test/instance-db +++ b/test/instance-db @@ -2,7 +2,7 @@ #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" || exit 1 +cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh @@ -10,9 +10,9 @@ cd "${THIS_DIR}" || exit 1 backend_script="./lib/db/instance-${DB_BACKEND}" -if [[ ! -x "${backend_script}" ]]; then +if [[ ! -x "$backend_script" ]]; then echo "unknown database '${DB_BACKEND}'" >&2 exit 1 fi -exec "${backend_script}" "$@" +exec "$backend_script" "$@" diff --git a/test/instance-mock-http b/test/instance-mock-http index cca19b79e3e..b5a56d3489d 100755 --- a/test/instance-mock-http +++ b/test/instance-mock-http @@ -13,7 +13,7 @@ about() { #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" +cd "$THIS_DIR" # shellcheck disable=SC1091 . ./.environment.sh @@ -31,7 +31,7 @@ DAEMON_PID=${PID_DIR}/mock-http.pid start_instance() { [[ $# -lt 1 ]] && about daemonize \ - -p "${DAEMON_PID}" \ + -p "$DAEMON_PID" \ -e "${LOG_DIR}/mock-http.err" \ -o "${LOG_DIR}/mock-http.out" \ /usr/bin/env python3 -u "${THIS_DIR}/bin/mock-http.py" "$1" @@ -40,10 +40,10 @@ start_instance() { } stop_instance() { - if [[ -f "${DAEMON_PID}" ]]; then + if [[ -f "$DAEMON_PID" ]]; then # terminate with extreme prejudice, all the application data will be thrown away anyway - kill -9 "$(cat "${DAEMON_PID}")" > /dev/null 2>&1 - rm -f -- "${DAEMON_PID}" + kill -9 "$(cat "$DAEMON_PID")" > /dev/null 2>&1 + rm -f -- "$DAEMON_PID" fi } diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 693b157f531..76bd4c3fbce 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -9,7 +9,7 @@ die() { } about() { - die "usage: ${script_name} [make | load | clean]" + die "usage: ${script_name} [make | load | lock | unlock | clean]" } #shellcheck disable=SC1007 @@ -134,7 +134,16 @@ make_init_data() { remove_init_data } +lock_init_data() { + touch "${LOCAL_INIT_DIR}/.lock" +} + +unlock_init_data() { + rm -f "${LOCAL_INIT_DIR}/.lock" +} + load_init_data() { + [[ -f "${LOCAL_INIT_DIR}/.lock" ]] && die "init data is locked" ./bin/assert-crowdsec-not-running || die "Cannot load fixture data." if [[ ! -f "${LOCAL_INIT_DIR}/init-config-data.tar" ]]; then @@ -164,6 +173,12 @@ case "$1" in load) load_init_data ;; + lock) + lock_init_data + ;; + unlock) + unlock_init_data + ;; clean) remove_init_data ;; diff --git a/test/run-tests b/test/run-tests index 21b7a7320c5..6fe3bd004e2 100755 --- a/test/run-tests +++ b/test/run-tests @@ -16,14 +16,16 @@ TEST_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) echo "Running tests..." echo "DB_BACKEND: ${DB_BACKEND}" -if [[ -z "${TEST_COVERAGE}" ]]; then +if [[ -z "$TEST_COVERAGE" ]]; then echo "Coverage report: no" else echo "Coverage report: yes" fi +[[ -f "$LOCAL_INIT_DIR/.lock" ]] && die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" + dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" -if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then +if [[ "$DB_BACKEND" != "$dump_backend" ]]; then die "Can't run with backend '${DB_BACKEND}' because the test data was build with '${dump_backend}'" fi From b6253d567bbe50e6aa146e148a333c0f2e104e51 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 May 2024 11:13:39 +0200 Subject: [PATCH 134/318] simulation for local scenarios (#3010) --- pkg/cwhub/hub.go | 26 ++++++++++------ pkg/cwhub/leakybucket.go | 53 --------------------------------- pkg/cwhub/sync.go | 5 +++- pkg/leakybucket/manager_load.go | 16 ++++------ test/bats/50_simulation.bats | 13 ++++++++ 5 files changed, 40 insertions(+), 73 deletions(-) delete mode 100644 pkg/cwhub/leakybucket.go diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 87a6644bc72..e7d927f54b1 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -17,11 +17,12 @@ import ( // Hub is the main structure for the package. type Hub struct { - items HubItems // Items read from HubDir and InstallDir - local *csconfig.LocalHubCfg - remote *RemoteHubCfg - logger *logrus.Logger - Warnings []string // Warnings encountered during sync + items HubItems // Items read from HubDir and InstallDir + pathIndex map[string]*Item + local *csconfig.LocalHubCfg + remote *RemoteHubCfg + logger *logrus.Logger + Warnings []string // Warnings encountered during sync } // GetDataDir returns the data directory, where data sets are installed. @@ -43,9 +44,10 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, } hub := &Hub{ - local: local, - remote: remote, - logger: logger, + local: local, + remote: remote, + logger: logger, + pathIndex: make(map[string]*Item, 0), } if updateIndex { @@ -137,7 +139,7 @@ func (h *Hub) ItemStats() []string { } ret := []string{ - fmt.Sprintf("Loaded: %s", loaded), + "Loaded: " + loaded, } if local > 0 || tainted > 0 { @@ -169,6 +171,7 @@ func (h *Hub) addItem(item *Item) { } h.items[item.Type][item.Name] = item + h.pathIndex[item.State.LocalPath] = item } // GetItemMap returns the map of items for a given type. @@ -181,6 +184,11 @@ func (h *Hub) GetItem(itemType string, itemName string) *Item { return h.GetItemMap(itemType)[itemName] } +// GetItemByPath returns an item from hub based on its (absolute) local path. +func (h *Hub) GetItemByPath(itemPath string) *Item { + return h.pathIndex[itemPath] +} + // GetItemFQ returns an item from hub based on its type and name (type:author/name). func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { // type and name are separated by a colon diff --git a/pkg/cwhub/leakybucket.go b/pkg/cwhub/leakybucket.go deleted file mode 100644 index 8143e9433ee..00000000000 --- a/pkg/cwhub/leakybucket.go +++ /dev/null @@ -1,53 +0,0 @@ -package cwhub - -// Resolve a symlink to find the hub item it points to. -// This file is used only by pkg/leakybucket - -import ( - "fmt" - "os" - "path/filepath" - "strings" -) - -// itemKey extracts the map key of an item (i.e. author/name) from its pathname. Follows a symlink if necessary. -func itemKey(itemPath string) (string, error) { - f, err := os.Lstat(itemPath) - if err != nil { - return "", fmt.Errorf("while performing lstat on %s: %w", itemPath, err) - } - - if f.Mode()&os.ModeSymlink == 0 { - // it's not a symlink, so the filename itsef should be the key - return filepath.Base(itemPath), nil - } - - // resolve the symlink to hub file - pathInHub, err := os.Readlink(itemPath) - if err != nil { - return "", fmt.Errorf("while reading symlink of %s: %w", itemPath, err) - } - - author := filepath.Base(filepath.Dir(pathInHub)) - - fname := filepath.Base(pathInHub) - fname = strings.TrimSuffix(fname, ".yaml") - fname = strings.TrimSuffix(fname, ".yml") - - return fmt.Sprintf("%s/%s", author, fname), nil -} - -// GetItemByPath retrieves an item from the hub index based on its local path. -func (h *Hub) GetItemByPath(itemType string, itemPath string) (*Item, error) { - itemKey, err := itemKey(itemPath) - if err != nil { - return nil, err - } - - item := h.GetItem(itemType, itemKey) - if item == nil { - return nil, fmt.Errorf("%s not found in %s", itemKey, itemType) - } - - return item, nil -} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 42db255c883..fd5d6b81220 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -9,9 +9,10 @@ import ( "strings" "github.com/Masterminds/semver/v3" - "github.com/crowdsecurity/go-cs-lib/downloader" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/downloader" ) func isYAMLFileName(path string) bool { @@ -271,6 +272,8 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return err } + h.pathIndex[path] = item + return nil } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index bc259c18319..c94291100a4 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -253,7 +253,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) if err != nil { - return nil, nil, fmt.Errorf("failed to check version : %s", err) + return nil, nil, fmt.Errorf("failed to check version: %w", err) } if !ok { @@ -265,20 +265,16 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response - hubItem, err := hub.GetItemByPath(cwhub.SCENARIOS, bucketFactory.Filename) - if err != nil { - log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) + hubItem := hub.GetItemByPath(bucketFactory.Filename) + if hubItem == nil { + log.Errorf("scenario %s (%s) could not be found in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) } else { if cscfg.SimulationConfig != nil { bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) } - if hubItem != nil { - bucketFactory.ScenarioVersion = hubItem.State.LocalVersion - bucketFactory.hash = hubItem.State.LocalHash - } else { - log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) - } + bucketFactory.ScenarioVersion = hubItem.State.LocalVersion + bucketFactory.hash = hubItem.State.LocalHash } bucketFactory.wgDumpState = buckets.wgDumpState diff --git a/test/bats/50_simulation.bats b/test/bats/50_simulation.bats index ab4145551ff..2dc93e62d06 100644 --- a/test/bats/50_simulation.bats +++ b/test/bats/50_simulation.bats @@ -62,6 +62,19 @@ setup() { assert_json '[]' } +@test "simulated local scenario: expect no decision" { + CONFIG_DIR=$(dirname "$CONFIG_YAML") + HUB_DIR=$(config_get '.config_paths.hub_dir') + rune -0 mkdir -p "$CONFIG_DIR"/scenarios + # replace an installed scenario with a local version + rune -0 cp -r "$HUB_DIR"/scenarios/crowdsecurity/ssh-bf.yaml "$CONFIG_DIR"/scenarios/ssh-bf2.yaml + rune -0 cscli scenarios remove crowdsecurity/ssh-bf --force --purge + rune -0 cscli simulation enable crowdsecurity/ssh-bf + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api + rune -0 cscli decisions list --no-simu -o json + assert_json '[]' +} + @test "global simulation, listing non-simulated: expect no decision" { rune -0 cscli simulation disable crowdsecurity/ssh-bf rune -0 cscli simulation enable --global From 0ba05acc03b6a177fbd14acda6b97216bcbdb13d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 17 May 2024 10:07:28 +0200 Subject: [PATCH 135/318] tests: replace "docker-compose" -> "docker compose" (#3000) * replace "docker-compose" -> "docker compose" * obsolete doc note * bats: clean up extra output --- Makefile | 15 ++++++++++----- test/README.md | 7 ------- test/bats/50_simulation.bats | 6 +++--- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index 3f271c54ca4..9e21abee02a 100644 --- a/Makefile +++ b/Makefile @@ -220,7 +220,7 @@ export AWS_ACCESS_KEY_ID=test export AWS_SECRET_ACCESS_KEY=test testenv: - @echo 'NOTE: You need Docker, docker-compose and run "make localstack" in a separate shell ("make localstack-stop" to terminate it)' + @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it' .PHONY: test test: testenv goversion ## Run unit tests with localstack @@ -230,14 +230,19 @@ test: testenv goversion ## Run unit tests with localstack go-acc: testenv goversion ## Run unit tests with localstack + coverage go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models -- $(LD_OPTS) +check_docker: + @if ! docker info > /dev/null 2>&1; then \ + echo "Could not run 'docker info': check that docker is running, and if you need to run this command with sudo."; \ + fi + # mock AWS services .PHONY: localstack -localstack: ## Run localstack containers (required for unit testing) - docker-compose -f test/localstack/docker-compose.yml up +localstack: check_docker ## Run localstack containers (required for unit testing) + docker compose -f test/localstack/docker-compose.yml up .PHONY: localstack-stop -localstack-stop: ## Stop localstack containers - docker-compose -f test/localstack/docker-compose.yml down +localstack-stop: check_docker ## Stop localstack containers + docker compose -f test/localstack/docker-compose.yml down # build vendor.tgz to be distributed with the release .PHONY: vendor diff --git a/test/README.md b/test/README.md index 4ed132b2cfa..f7b036e7905 100644 --- a/test/README.md +++ b/test/README.md @@ -415,10 +415,3 @@ different syntax. Check the heredocs (the </dev/null + cscli parsers install crowdsecurity/syslog-logs --error >/dev/null + cscli parsers install crowdsecurity/dateparse-enrich --error >/dev/null ./instance-crowdsec start } From 1a4ac9d2391c5c53f4038d8fc3cfb2550d126702 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 17 May 2024 14:26:10 +0200 Subject: [PATCH 136/318] replace log.Fatal with error return (#2996) * log.Fatal -> fmt.Errorf * lint --- pkg/apiserver/papi.go | 10 +++++----- pkg/cache/cache.go | 31 +++++++++++++++++++++---------- pkg/csprofiles/csprofiles.go | 3 ++- pkg/hubtest/hubtest_item.go | 5 +++-- pkg/leakybucket/manager_load.go | 6 +++--- pkg/parser/node.go | 2 +- 6 files changed, 35 insertions(+), 22 deletions(-) diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 8dbd1bb9641..169f1441df4 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -130,7 +130,7 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { message := &Message{} if err := json.Unmarshal([]byte(event.Data), message); err != nil { - return fmt.Errorf("polling papi message format is not compatible: %+v: %s", event.Data, err) + return fmt.Errorf("polling papi message format is not compatible: %+v: %w", event.Data, err) } if message.Header == nil { @@ -161,12 +161,12 @@ func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) if err != nil { - return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request: %w", err) } resp, err := httpClient.Do(req) if err != nil { - log.Fatalf("failed to get response : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to get response: %w", err) } defer resp.Body.Close() @@ -176,7 +176,7 @@ func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { err = json.NewDecoder(resp.Body).Decode(&errResp) if err != nil { - return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response: %w", err) } return PapiPermCheckSuccess{}, fmt.Errorf("unable to query PAPI : %s (%d)", errResp.Error, resp.StatusCode) @@ -186,7 +186,7 @@ func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { err = json.NewDecoder(resp.Body).Decode(&respBody) if err != nil { - return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response: %w", err) } return respBody, nil diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 1fd65dc38c3..5b0dcbdc251 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -2,6 +2,7 @@ package cache import ( "errors" + "fmt" "time" "github.com/bluele/gcache" @@ -11,9 +12,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var Caches []gcache.Cache -var CacheNames []string -var CacheConfig []CacheCfg +var ( + Caches []gcache.Cache + CacheNames []string + CacheConfig []CacheCfg +) /*prometheus*/ var CacheMetrics = prometheus.NewGaugeVec( @@ -27,6 +30,7 @@ var CacheMetrics = prometheus.NewGaugeVec( // UpdateCacheMetrics is called directly by the prom handler func UpdateCacheMetrics() { CacheMetrics.Reset() + for i, name := range CacheNames { CacheMetrics.With(prometheus.Labels{"name": name, "type": CacheConfig[i].Strategy}).Set(float64(Caches[i].Len(false))) } @@ -42,27 +46,30 @@ type CacheCfg struct { } func CacheInit(cfg CacheCfg) error { - for _, name := range CacheNames { if name == cfg.Name { log.Infof("Cache %s already exists", cfg.Name) } } - //get a default logger + // get a default logger if cfg.LogLevel == nil { cfg.LogLevel = new(log.Level) *cfg.LogLevel = log.InfoLevel } - var clog = log.New() + + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { - log.Fatalf("While creating cache logger : %s", err) + return fmt.Errorf("while creating cache logger: %w", err) } + clog.SetLevel(*cfg.LogLevel) cfg.Logger = clog.WithFields(log.Fields{ "cache": cfg.Name, }) tmpCache := gcache.New(cfg.Size) + switch cfg.Strategy { case "LRU": tmpCache = tmpCache.LRU() @@ -73,7 +80,6 @@ func CacheInit(cfg CacheCfg) error { default: cfg.Strategy = "LRU" tmpCache = tmpCache.LRU() - } CTICache := tmpCache.Build() @@ -85,18 +91,20 @@ func CacheInit(cfg CacheCfg) error { } func SetKey(cacheName string, key string, value string, expiration *time.Duration) error { - for i, name := range CacheNames { if name == cacheName { if expiration == nil { expiration = &CacheConfig[i].TTL } + CacheConfig[i].Logger.Debugf("Setting key %s to %s with expiration %v", key, value, *expiration) + if err := Caches[i].SetWithExpire(key, value, *expiration); err != nil { CacheConfig[i].Logger.Warningf("While setting key %s in cache %s: %s", key, cacheName, err) } } } + return nil } @@ -104,17 +112,20 @@ func GetKey(cacheName string, key string) (string, error) { for i, name := range CacheNames { if name == cacheName { if value, err := Caches[i].Get(key); err != nil { - //do not warn or log if key not found + // do not warn or log if key not found if errors.Is(err, gcache.KeyNotFoundError) { return "", nil } CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err) + return "", err } else { return value.(string), nil } } } + log.Warningf("Cache %s not found", cacheName) + return "", nil } diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 95fbb356f3d..42509eaceae 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -35,7 +35,7 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { xlog := log.New() if err := types.ConfigureLogger(xlog); err != nil { - log.Fatalf("While creating profiles-specific logger : %s", err) + return nil, fmt.Errorf("while configuring profiles-specific logger: %w", err) } xlog.SetLevel(log.InfoLevel) @@ -196,6 +196,7 @@ func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision decisions = append(decisions, subdecisions...) } else { Profile.Logger.Debugf("Profile %s filter is unsuccessful", Profile.Cfg.Name) + if Profile.Cfg.OnFailure == "break" { break } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 1a2b4863105..918a10f62e1 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -212,7 +212,7 @@ func (t *HubTestItem) InstallHub() error { // load installed hub hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, false, nil) if err != nil { - log.Fatal(err) + return err } // install data for parsers if needed @@ -327,7 +327,8 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { nucleiConfig := NucleiConfig{ Path: "nuclei", OutputDir: t.RuntimePath, - CmdLineOptions: []string{"-ev", // allow variables from environment + CmdLineOptions: []string{ + "-ev", // allow variables from environment "-nc", // no colors in output "-dresp", // dump response "-j", // json output diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index c94291100a4..3e47f1fc16f 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -308,7 +308,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Debug { clog := log.New() if err := types.ConfigureLogger(clog); err != nil { - log.Fatalf("While creating bucket-specific logger : %s", err) + return fmt.Errorf("while creating bucket-specific logger: %w", err) } clog.SetLevel(log.DebugLevel) @@ -462,7 +462,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac val, ok := buckets.Bucket_map.Load(k) if ok { - log.Fatalf("key %s already exists : %+v", k, val) + return fmt.Errorf("key %s already exists: %+v", k, val) } // find back our holder found := false @@ -502,7 +502,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if !found { - log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) + return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(v)) } } diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 0906f0b5143..244f361d6b8 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -447,7 +447,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { if n.Debug { clog := log.New() if err = types.ConfigureLogger(clog); err != nil { - log.Fatalf("While creating bucket-specific logger : %s", err) + return fmt.Errorf("while creating bucket-specific logger: %w", err) } clog.SetLevel(log.DebugLevel) From 20e44cd18ab0ec408108b83616303ff64218da34 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 17 May 2024 16:16:04 +0200 Subject: [PATCH 137/318] appsec: add files and cookies related zones (#2956) --- .../modules/appsec/appsec_rules_test.go | 128 ++++++++++++++++++ pkg/appsec/appsec_rule/modsecurity.go | 29 ++-- 2 files changed, 145 insertions(+), 12 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index 3c48c50fabb..b25e4465f0f 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -230,6 +230,134 @@ func TestAppsecRuleMatches(t *testing.T) { require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) }, }, + { + name: "Basic matching in cookies", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"COOKIES"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Cookie": []string{"foo=toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching in all cookies", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"COOKIES"}, + Match: appsec_rule.Match{Type: "regex", Value: "^tutu"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Cookie": []string{"foo=toto; bar=tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching in cookie name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"COOKIES_NAMES"}, + Match: appsec_rule.Match{Type: "regex", Value: "^tutu"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Cookie": []string{"bar=tutu; tututata=toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching in multipart file name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"FILES"}, + Match: appsec_rule.Match{Type: "regex", Value: "\\.php$"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, + Body: []byte(` +--boundary +Content-Disposition: form-data; name="foo"; filename="bar.php" +Content-Type: application/octet-stream + +toto +--boundary--`), + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, } for _, test := range tests { diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index a269384ccb9..03a840cf436 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -11,18 +11,23 @@ type ModsecurityRule struct { } var zonesMap map[string]string = map[string]string{ - "ARGS": "ARGS_GET", - "ARGS_NAMES": "ARGS_GET_NAMES", - "BODY_ARGS": "ARGS_POST", - "BODY_ARGS_NAMES": "ARGS_POST_NAMES", - "HEADERS_NAMES": "REQUEST_HEADERS_NAMES", - "HEADERS": "REQUEST_HEADERS", - "METHOD": "REQUEST_METHOD", - "PROTOCOL": "REQUEST_PROTOCOL", - "URI": "REQUEST_FILENAME", - "URI_FULL": "REQUEST_URI", - "RAW_BODY": "REQUEST_BODY", - "FILENAMES": "FILES", + "ARGS": "ARGS_GET", + "ARGS_NAMES": "ARGS_GET_NAMES", + "BODY_ARGS": "ARGS_POST", + "BODY_ARGS_NAMES": "ARGS_POST_NAMES", + "COOKIES": "REQUEST_COOKIES", + "COOKIES_NAMES": "REQUEST_COOKIES_NAMES", + "FILES": "FILES", + "FILES_NAMES": "FILES_NAMES", + "FILES_TOTAL_SIZE": "FILES_COMBINED_SIZE", + "HEADERS_NAMES": "REQUEST_HEADERS_NAMES", + "HEADERS": "REQUEST_HEADERS", + "METHOD": "REQUEST_METHOD", + "PROTOCOL": "REQUEST_PROTOCOL", + "URI": "REQUEST_FILENAME", + "URI_FULL": "REQUEST_URI", + "RAW_BODY": "REQUEST_BODY", + "FILENAMES": "FILES", } var transformMap map[string]string = map[string]string{ From 572b387fce5ad9f11e56e6e104a576e1b612bc57 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 May 2024 10:20:47 +0200 Subject: [PATCH 138/318] CI: improve code coverage report (#3029) --- .github/codecov-ignore-generated.sh | 11 ++ .github/codecov.yml | 129 +++++++++++++++++++++ .github/workflows/bats-sqlite-coverage.yml | 5 +- .github/workflows/bats.yml | 2 + 4 files changed, 146 insertions(+), 1 deletion(-) create mode 100755 .github/codecov-ignore-generated.sh diff --git a/.github/codecov-ignore-generated.sh b/.github/codecov-ignore-generated.sh new file mode 100755 index 00000000000..3c896d47be7 --- /dev/null +++ b/.github/codecov-ignore-generated.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Run this from the repository root: +# +# .github/codecov-ignore-generated.sh >> .github/codecov.yml + +find . -name "*.go" | while read -r file; do + if head -n 1 "$file" | grep -q "Code generated by"; then + echo " - \"$file\"" + fi +done diff --git a/.github/codecov.yml b/.github/codecov.yml index 5f721427d7a..82598c15511 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -8,3 +8,132 @@ coverage: project: default: target: 0% + +# if a directory is ignored, there is no way to un-ignore files like pkg/models/helpers.go +# so we make a full list, manually updated - but it could be generated right before running codecov +ignore: + - "./pkg/modelscapi/success_response.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted.go" + - "./pkg/modelscapi/login_request.go" + - "./pkg/modelscapi/get_decisions_stream_response_links.go" + - "./pkg/modelscapi/login_response.go" + - "./pkg/modelscapi/add_signals_request_item.go" + - "./pkg/modelscapi/blocklist_link.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted_item.go" + - "./pkg/modelscapi/decisions_sync_request.go" + - "./pkg/modelscapi/get_decisions_stream_response.go" + - "./pkg/modelscapi/metrics_request_machines_item.go" + - "./pkg/modelscapi/metrics_request.go" + - "./pkg/modelscapi/get_decisions_stream_response_new.go" + - "./pkg/modelscapi/add_signals_request_item_decisions_item.go" + - "./pkg/modelscapi/metrics_request_bouncers_item.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions_item.go" + - "./pkg/modelscapi/decisions_delete_request_item.go" + - "./pkg/modelscapi/get_decisions_stream_response_new_item.go" + - "./pkg/modelscapi/decisions_sync_request_item.go" + - "./pkg/modelscapi/add_signals_request.go" + - "./pkg/modelscapi/reset_password_request.go" + - "./pkg/modelscapi/add_signals_request_item_decisions.go" + - "./pkg/modelscapi/decisions_sync_request_item_source.go" + - "./pkg/modelscapi/error_response.go" + - "./pkg/modelscapi/decisions_delete_request.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions.go" + - "./pkg/modelscapi/enroll_request.go" + - "./pkg/modelscapi/register_request.go" + - "./pkg/modelscapi/add_signals_request_item_source.go" + - "./pkg/models/alert.go" + - "./pkg/models/metrics_bouncer_info.go" + - "./pkg/models/add_signals_request_item.go" + - "./pkg/models/add_signals_request_item_decisions_item.go" + - "./pkg/models/get_alerts_response.go" + - "./pkg/models/watcher_auth_request.go" + - "./pkg/models/add_alerts_request.go" + - "./pkg/models/event.go" + - "./pkg/models/decisions_delete_request_item.go" + - "./pkg/models/meta.go" + - "./pkg/models/delete_alerts_response.go" + - "./pkg/models/topx_response.go" + - "./pkg/models/add_signals_request.go" + - "./pkg/models/delete_decision_response.go" + - "./pkg/models/get_decisions_response.go" + - "./pkg/models/add_signals_request_item_decisions.go" + - "./pkg/models/source.go" + - "./pkg/models/decisions_stream_response.go" + - "./pkg/models/error_response.go" + - "./pkg/models/decision.go" + - "./pkg/models/decisions_delete_request.go" + - "./pkg/models/flush_decision_response.go" + - "./pkg/models/watcher_auth_response.go" + - "./pkg/models/watcher_registration_request.go" + - "./pkg/models/metrics_agent_info.go" + - "./pkg/models/add_signals_request_item_source.go" + - "./pkg/models/add_alerts_response.go" + - "./pkg/models/metrics.go" + - "./pkg/protobufs/notifier.pb.go" + - "./pkg/database/ent/machine_delete.go" + - "./pkg/database/ent/decision_query.go" + - "./pkg/database/ent/meta_query.go" + - "./pkg/database/ent/machine_create.go" + - "./pkg/database/ent/alert.go" + - "./pkg/database/ent/event_update.go" + - "./pkg/database/ent/alert_create.go" + - "./pkg/database/ent/alert_query.go" + - "./pkg/database/ent/lock_create.go" + - "./pkg/database/ent/bouncer_update.go" + - "./pkg/database/ent/meta_update.go" + - "./pkg/database/ent/decision_create.go" + - "./pkg/database/ent/configitem_update.go" + - "./pkg/database/ent/machine_query.go" + - "./pkg/database/ent/client.go" + - "./pkg/database/ent/predicate/predicate.go" + - "./pkg/database/ent/lock/where.go" + - "./pkg/database/ent/lock/lock.go" + - "./pkg/database/ent/mutation.go" + - "./pkg/database/ent/migrate/migrate.go" + - "./pkg/database/ent/migrate/schema.go" + - "./pkg/database/ent/configitem.go" + - "./pkg/database/ent/event.go" + - "./pkg/database/ent/event_query.go" + - "./pkg/database/ent/lock_update.go" + - "./pkg/database/ent/meta.go" + - "./pkg/database/ent/configitem_query.go" + - "./pkg/database/ent/bouncer.go" + - "./pkg/database/ent/alert_update.go" + - "./pkg/database/ent/meta/meta.go" + - "./pkg/database/ent/meta/where.go" + - "./pkg/database/ent/decision_update.go" + - "./pkg/database/ent/alert_delete.go" + - "./pkg/database/ent/lock.go" + - "./pkg/database/ent/runtime/runtime.go" + - "./pkg/database/ent/alert/alert.go" + - "./pkg/database/ent/alert/where.go" + - "./pkg/database/ent/runtime.go" + - "./pkg/database/ent/bouncer/bouncer.go" + - "./pkg/database/ent/bouncer/where.go" + - "./pkg/database/ent/hook/hook.go" + - "./pkg/database/ent/configitem_create.go" + - "./pkg/database/ent/configitem_delete.go" + - "./pkg/database/ent/tx.go" + - "./pkg/database/ent/decision.go" + - "./pkg/database/ent/lock_delete.go" + - "./pkg/database/ent/decision_delete.go" + - "./pkg/database/ent/machine/where.go" + - "./pkg/database/ent/machine/machine.go" + - "./pkg/database/ent/event_create.go" + - "./pkg/database/ent/decision/where.go" + - "./pkg/database/ent/decision/decision.go" + - "./pkg/database/ent/enttest/enttest.go" + - "./pkg/database/ent/lock_query.go" + - "./pkg/database/ent/bouncer_create.go" + - "./pkg/database/ent/event_delete.go" + - "./pkg/database/ent/bouncer_delete.go" + - "./pkg/database/ent/event/event.go" + - "./pkg/database/ent/event/where.go" + - "./pkg/database/ent/machine.go" + - "./pkg/database/ent/ent.go" + - "./pkg/database/ent/meta_create.go" + - "./pkg/database/ent/bouncer_query.go" + - "./pkg/database/ent/meta_delete.go" + - "./pkg/database/ent/machine_update.go" + - "./pkg/database/ent/configitem/configitem.go" + - "./pkg/database/ent/configitem/where.go" diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 4f724656ed6..0d9906d11f0 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -2,6 +2,9 @@ name: (sub) Bats / sqlite + coverage on: workflow_call: + secrets: + CODECOV_TOKEN: + required: true env: TEST_COVERAGE: true @@ -76,7 +79,7 @@ jobs: run: for file in $(find ./test/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done if: ${{ always() }} - - name: Upload crowdsec coverage to codecov + - name: Upload bats coverage to codecov uses: codecov/codecov-action@v4 with: files: ./coverage-bats.out diff --git a/.github/workflows/bats.yml b/.github/workflows/bats.yml index 0ce8cf041ed..59976bad87d 100644 --- a/.github/workflows/bats.yml +++ b/.github/workflows/bats.yml @@ -28,6 +28,8 @@ on: jobs: sqlite: uses: ./.github/workflows/bats-sqlite-coverage.yml + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # Jobs for Postgres (and sometimes MySQL) can have failing tests on GitHub # CI, but they pass when run on devs' machines or in the release checks. We From 816608daf42d9de8d6447f823599302fb45f328d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 May 2024 11:17:52 +0200 Subject: [PATCH 139/318] cscli: faster table rendering (#3027) * cscli: use go-pretty for tables * lint --- cmd/crowdsec-cli/bouncers_table.go | 3 +- cmd/crowdsec-cli/console_table.go | 3 +- cmd/crowdsec-cli/hubtest_table.go | 3 +- cmd/crowdsec-cli/machines_table.go | 3 +- cmd/crowdsec-cli/metrics_table.go | 9 +- cmd/crowdsec-cli/notifications_table.go | 3 +- cmd/crowdsec-cli/prettytable.go | 147 ++++++++++++++++++++++++ cmd/crowdsec-cli/table/align.go | 12 ++ cmd/crowdsec-cli/tables.go | 77 ++----------- cmd/crowdsec-cli/utils_table.go | 3 +- go.mod | 4 +- go.sum | 8 +- 12 files changed, 183 insertions(+), 92 deletions(-) create mode 100644 cmd/crowdsec-cli/prettytable.go create mode 100644 cmd/crowdsec-cli/table/align.go diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go index 5fe48b49047..417eb9e8e0b 100644 --- a/cmd/crowdsec-cli/bouncers_table.go +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -4,8 +4,7 @@ import ( "io" "time" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index 8f7ebb2100c..4623f3bb62a 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -3,8 +3,7 @@ package main import ( "io" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go index e6c5ee80abd..1b76e75263e 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -4,8 +4,7 @@ import ( "fmt" "io" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go index 120929ea654..18e16bbde3a 100644 --- a/cmd/crowdsec-cli/machines_table.go +++ b/cmd/crowdsec-cli/machines_table.go @@ -4,8 +4,7 @@ import ( "io" "time" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 689929500ad..f42d5bdaf91 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -7,16 +7,17 @@ import ( "sort" "strconv" - "github.com/aquasecurity/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. var ErrNilTable = errors.New("nil table") -func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { +func lapiMetricsToTable(t *Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count // sort keys to keep consistent order when printing machineKeys := []string{} @@ -54,7 +55,7 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i return numRows } -func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { +func wlMetricsToTable(t *Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -92,7 +93,7 @@ func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int return numRows, nil } -func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { +func metricsToTable(t *Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go index 19d11cea741..b96c8ca4783 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/notifications_table.go @@ -5,8 +5,7 @@ import ( "sort" "strings" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/prettytable.go b/cmd/crowdsec-cli/prettytable.go new file mode 100644 index 00000000000..0ce7e9755f9 --- /dev/null +++ b/cmd/crowdsec-cli/prettytable.go @@ -0,0 +1,147 @@ +package main + +// transisional file to keep (minimal) backwards compatibility with the old table +// we can migrate the code to the new dependency later, it can already use the Writer interface + +import ( + "fmt" + "io" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" +) + +type Table struct { + Writer table.Writer + output io.Writer + align []text.Align + alignHeader []text.Align +} + +func newTable(out io.Writer) *Table { + if out == nil { + panic("newTable: out is nil") + } + + t := table.NewWriter() + + // colorize output, use unicode box characters + fancy := shouldWeColorize() + + color := table.ColorOptions{} + + if fancy { + color.Header = text.Colors{text.Italic} + color.Border = text.Colors{text.FgHiBlack} + color.Separator = text.Colors{text.FgHiBlack} + } + + // no upper/lower case transformations + format := table.FormatOptions{} + + box := table.StyleBoxDefault + if fancy { + box = table.StyleBoxRounded + } + + style := table.Style{ + Box: box, + Color: color, + Format: format, + HTML: table.DefaultHTMLOptions, + Options: table.OptionsDefault, + Title: table.TitleOptionsDefault, + } + + t.SetStyle(style) + + return &Table{ + Writer: t, + output: out, + align: make([]text.Align, 0), + alignHeader: make([]text.Align, 0), + } +} + +func newLightTable(output io.Writer) *Table { + t := newTable(output) + s := t.Writer.Style() + s.Box.Left = "" + s.Box.LeftSeparator = "" + s.Box.TopLeft = "" + s.Box.BottomLeft = "" + s.Box.Right = "" + s.Box.RightSeparator = "" + s.Box.TopRight = "" + s.Box.BottomRight = "" + s.Options.SeparateRows = false + s.Options.SeparateFooter = false + s.Options.SeparateHeader = true + s.Options.SeparateColumns = false + + return t +} + +// +// wrapper methods for backwards compatibility +// + +// setColumnConfigs must be called right before rendering, +// to allow for setting the alignment like the old API +func (t *Table) setColumnConfigs() { + configs := []table.ColumnConfig{} + // the go-pretty table does not expose the names or number of columns + for i := 0; i < len(t.align); i++ { + configs = append(configs, table.ColumnConfig{ + Number: i + 1, + AlignHeader: t.alignHeader[i], + Align: t.align[i], + WidthMax: 60, + WidthMaxEnforcer: text.WrapSoft, + }) + } + t.Writer.SetColumnConfigs(configs) +} + +func (t *Table) Render() { + // change default options for backwards compatibility. + // we do this late to allow changing the alignment like the old API + t.setColumnConfigs() + fmt.Fprintln(t.output, t.Writer.Render()) +} + +func (t *Table) SetHeaders(str ...string) { + row := table.Row{} + t.align = make([]text.Align, len(str)) + t.alignHeader = make([]text.Align, len(str)) + + for i, v := range str { + row = append(row, v) + t.align[i] = text.AlignLeft + t.alignHeader[i] = text.AlignCenter + } + + t.Writer.AppendHeader(row) +} + +func (t *Table) AddRow(str ...string) { + row := table.Row{} + for _, v := range str { + row = append(row, v) + } + + t.Writer.AppendRow(row) +} + +func (t *Table) SetRowLines(rowLines bool) { + t.Writer.Style().Options.SeparateRows = rowLines +} + +func (t *Table) SetAlignment(align ...text.Align) { + // align can be shorter than t.align, it will leave the default value + copy(t.align, align) +} + +func (t *Table) SetHeaderAlignment(align ...text.Align) { + copy(t.alignHeader, align) +} diff --git a/cmd/crowdsec-cli/table/align.go b/cmd/crowdsec-cli/table/align.go new file mode 100644 index 00000000000..e0582007c57 --- /dev/null +++ b/cmd/crowdsec-cli/table/align.go @@ -0,0 +1,12 @@ +package table + +import ( + "github.com/jedib0t/go-pretty/v6/text" +) + +// temporary, backward compatibility only + +const ( + AlignLeft = text.AlignLeft + AlignRight = text.AlignRight +) diff --git a/cmd/crowdsec-cli/tables.go b/cmd/crowdsec-cli/tables.go index 2c3173d0b0b..e6dba0c2644 100644 --- a/cmd/crowdsec-cli/tables.go +++ b/cmd/crowdsec-cli/tables.go @@ -5,91 +5,28 @@ import ( "io" "os" - "github.com/aquasecurity/table" isatty "github.com/mattn/go-isatty" ) func shouldWeColorize() bool { - if csConfig.Cscli.Color == "yes" { + switch csConfig.Cscli.Color { + case "yes": return true - } - if csConfig.Cscli.Color == "no" { + case "no": return false + default: + return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) } - return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) -} - -func newTable(out io.Writer) *table.Table { - if out == nil { - panic("newTable: out is nil") - } - t := table.New(out) - if shouldWeColorize() { - t.SetLineStyle(table.StyleBrightBlack) - t.SetHeaderStyle(table.StyleItalic) - } - - if shouldWeColorize() { - t.SetDividers(table.UnicodeRoundedDividers) - } else { - t.SetDividers(table.ASCIIDividers) - } - - return t -} - -func newLightTable(out io.Writer) *table.Table { - if out == nil { - panic("newTable: out is nil") - } - t := newTable(out) - t.SetRowLines(false) - t.SetBorderLeft(false) - t.SetBorderRight(false) - // This leaves three spaces between columns: - // left padding, invisible border, right padding - // There is no way to make two spaces without - // a SetColumnLines() method, but it's close enough. - t.SetPadding(1) - - if shouldWeColorize() { - t.SetDividers(table.Dividers{ - ALL: "─", - NES: "─", - NSW: "─", - NEW: "─", - ESW: "─", - NE: "─", - NW: "─", - SW: "─", - ES: "─", - EW: "─", - NS: " ", - }) - } else { - t.SetDividers(table.Dividers{ - ALL: "-", - NES: "-", - NSW: "-", - NEW: "-", - ESW: "-", - NE: "-", - NW: "-", - SW: "-", - ES: "-", - EW: "-", - NS: " ", - }) - } - return t } func renderTableTitle(out io.Writer, title string) { if out == nil { panic("renderTableTitle: out is nil") } + if title == "" { return } + fmt.Fprintln(out, title) } diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index 23bcff4e5c6..d7d26a65c12 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -5,8 +5,7 @@ import ( "io" "strconv" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/go.mod b/go.mod index 49cfe05ad39..93f3e4cd806 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/alexliesenfeld/health v0.8.0 github.com/antonmedv/expr v1.15.3 github.com/appleboy/gin-jwt/v2 v2.9.2 - github.com/aquasecurity/table v1.8.0 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 github.com/beevik/etree v1.3.0 @@ -58,6 +57,7 @@ require ( github.com/ivanpirog/coloredcobra v1.0.1 github.com/jackc/pgx/v4 v4.18.2 github.com/jarcoal/httpmock v1.1.0 + github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/jszwec/csvutil v1.5.1 github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 @@ -155,7 +155,7 @@ require ( github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/copystructure v1.2.0 // indirect diff --git a/go.sum b/go.sum index 05b38f68384..310cbd85ab7 100644 --- a/go.sum +++ b/go.sum @@ -47,8 +47,6 @@ github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5po github.com/appleboy/gin-jwt/v2 v2.9.2/go.mod h1:mxGjKt9Lrx9Xusy1SrnmsCJMZG6UJwmdHN9bN27/QDw= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= -github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= -github.com/aquasecurity/table v1.8.0/go.mod h1:eqOmvjjB7AhXFgFqpJUEE/ietg7RrMSJZXyTN8E/wZw= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -406,6 +404,8 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU= +github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -491,8 +491,8 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= From 45b403ecd7ff754b8ad45ba904b4e90ab3f941af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 May 2024 14:37:46 +0200 Subject: [PATCH 140/318] fix nil deref w/ malformed cti config (#3028) --- cmd/crowdsec/serve.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 9da3d80106a..a27622a641a 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -334,7 +334,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { log.Warningln("Exprhelpers loaded without database client.") } - if cConfig.API.CTI != nil && *cConfig.API.CTI.Enabled { + if cConfig.API.CTI != nil && cConfig.API.CTI.Enabled != nil && *cConfig.API.CTI.Enabled { log.Infof("Crowdsec CTI helper enabled") if err := exprhelpers.InitCrowdsecCTI(cConfig.API.CTI.Key, cConfig.API.CTI.CacheTimeout, cConfig.API.CTI.CacheSize, cConfig.API.CTI.LogLevel); err != nil { From 09afcbe93a5a36ce25553f7ebc8c42cee0b98098 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 24 May 2024 14:42:17 +0200 Subject: [PATCH 141/318] appsec: respect on_success parameter in hooks (#3017) --- .../modules/appsec/appsec_hooks_test.go | 198 +++++++++++++++++- pkg/appsec/appsec.go | 34 ++- 2 files changed, 221 insertions(+), 11 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index 3cb2fcfde29..65fba33ae81 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -274,6 +274,64 @@ func TestAppsecOnMatchHooks(t *testing.T) { require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, + { + name: "on_match: on_success break", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}, OnSuccess: "break"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.APPSEC, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: on_success continue", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}, OnSuccess: "continue"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.APPSEC, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -286,7 +344,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { tests := []appsecRuleTest{ { - name: "Basic on_load hook to disable inband rule", + name: "Basic pre_eval hook to disable inband rule", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -314,7 +372,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "Basic on_load fails to disable rule", + name: "Basic pre_eval fails to disable rule", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -349,7 +407,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : disable inband by tag", + name: "pre_eval : disable inband by tag", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -377,7 +435,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : disable inband by ID", + name: "pre_eval : disable inband by ID", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -405,7 +463,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : disable inband by name", + name: "pre_eval : disable inband by name", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -433,7 +491,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : outofband default behavior", + name: "pre_eval : outofband default behavior", expected_load_ok: true, outofband_rules: []appsec_rule.CustomRule{ { @@ -464,7 +522,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : set remediation by tag", + name: "pre_eval : set remediation by tag", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -491,7 +549,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : set remediation by name", + name: "pre_eval : set remediation by name", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -518,7 +576,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : set remediation by ID", + name: "pre_eval : set remediation by ID", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -546,6 +604,62 @@ func TestAppsecPreEvalHooks(t *testing.T) { require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) }, }, + { + name: "pre_eval : on_success continue", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar')"}, OnSuccess: "continue"}, + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar2')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar2", responses[0].Action) + }, + }, + { + name: "pre_eval : on_success break", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar')"}, OnSuccess: "break"}, + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar2')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, } for _, test := range tests { @@ -705,6 +819,72 @@ func TestOnMatchRemediationHooks(t *testing.T) { require.Equal(t, http.StatusForbidden, statusCode) }, }, + { + name: "on_match: on_success break", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}, OnSuccess: "break"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, + { + name: "on_match: on_success continue", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}, OnSuccess: "continue"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 554fc3b7123..2c971fb36c5 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -259,6 +259,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { //load hooks for _, hook := range wc.OnLoad { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for on_load hook : %s", hook.OnSuccess) + } err := hook.Build(hookOnLoad) if err != nil { return nil, fmt.Errorf("unable to build on_load hook : %s", err) @@ -267,6 +270,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } for _, hook := range wc.PreEval { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for pre_eval hook : %s", hook.OnSuccess) + } err := hook.Build(hookPreEval) if err != nil { return nil, fmt.Errorf("unable to build pre_eval hook : %s", err) @@ -275,6 +281,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } for _, hook := range wc.PostEval { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for post_eval hook : %s", hook.OnSuccess) + } err := hook.Build(hookPostEval) if err != nil { return nil, fmt.Errorf("unable to build post_eval hook : %s", err) @@ -283,6 +292,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } for _, hook := range wc.OnMatch { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for on_match hook : %s", hook.OnSuccess) + } err := hook.Build(hookOnMatch) if err != nil { return nil, fmt.Errorf("unable to build on_match hook : %s", err) @@ -302,6 +314,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { + has_match := false for _, rule := range w.CompiledOnLoad { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -318,6 +331,7 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -332,12 +346,15 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil } func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt types.Event) error { - + has_match := false for _, rule := range w.CompiledOnMatch { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -354,6 +371,7 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -368,12 +386,15 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil } func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error { - w.Logger.Debugf("processing %d pre_eval rules", len(w.CompiledPreEval)) + has_match := false for _, rule := range w.CompiledPreEval { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetPreEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -390,6 +411,7 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { @@ -405,12 +427,16 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil } func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error { + has_match := false for _, rule := range w.CompiledPostEval { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -427,6 +453,7 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { @@ -444,6 +471,9 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil From f06e3e78ab48d48dc910d83bbbcd966ff19e5e77 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 24 May 2024 14:43:12 +0200 Subject: [PATCH 142/318] jwt transport: fix retry on unauthorized from CAPI(#3006) --- pkg/apiclient/auth_jwt.go | 103 +++++++++++++++++++--------------- pkg/apiclient/client.go | 7 +++ pkg/apiclient/retry_config.go | 33 +++++++++++ 3 files changed, 98 insertions(+), 45 deletions(-) create mode 100644 pkg/apiclient/retry_config.go diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 6ee17fa5e72..b202e382842 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -26,6 +26,7 @@ type JWTTransport struct { URL *url.URL VersionPrefix string UserAgent string + RetryConfig *RetryConfig // Transport is the underlying HTTP transport to use when making requests. // It will default to http.DefaultTransport if nil. Transport http.RoundTripper @@ -165,36 +166,67 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) // RoundTrip implements the RoundTripper interface. func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req, err := t.prepareRequest(req) - if err != nil { - return nil, err - } - if log.GetLevel() >= log.TraceLevel { - // requestToDump := cloneRequest(req) - dump, _ := httputil.DumpRequest(req, true) - log.Tracef("req-jwt: %s", string(dump)) - } + var resp *http.Response + attemptsCount := make(map[int]int) - // Make the HTTP request. - resp, err := t.transport().RoundTrip(req) - if log.GetLevel() >= log.TraceLevel { - dump, _ := httputil.DumpResponse(resp, true) - log.Tracef("resp-jwt: %s (err:%v)", string(dump), err) - } + for { + if log.GetLevel() >= log.TraceLevel { + // requestToDump := cloneRequest(req) + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("req-jwt: %s", string(dump)) + } + // Make the HTTP request. + clonedReq := cloneRequest(req) - if err != nil { - // we had an error (network error for example, or 401 because token is refused), reset the token? - t.ResetToken() + clonedReq, err := t.prepareRequest(clonedReq) + if err != nil { + return nil, err + } - return resp, fmt.Errorf("performing jwt auth: %w", err) - } + resp, err = t.transport().RoundTrip(clonedReq) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("resp-jwt: %s (err:%v)", string(dump), err) + } - if resp != nil { - log.Debugf("resp-jwt: %d", resp.StatusCode) - } + if err != nil { + // we had an error (network error for example), reset the token? + t.ResetToken() + return resp, fmt.Errorf("performing jwt auth: %w", err) + } + + if resp != nil { + log.Debugf("resp-jwt: %d", resp.StatusCode) + } + config, shouldRetry := t.RetryConfig.StatusCodeConfig[resp.StatusCode] + if !shouldRetry { + break + } + + if attemptsCount[resp.StatusCode] >= config.MaxAttempts { + log.Infof("max attempts reached for status code %d", resp.StatusCode) + break + } + + if config.InvalidateToken { + log.Debugf("invalidating token for status code %d", resp.StatusCode) + t.ResetToken() + } + + log.Debugf("retrying request to %s", req.URL.String()) + attemptsCount[resp.StatusCode]++ + log.Infof("attempt %d out of %d", attemptsCount[resp.StatusCode], config.MaxAttempts) + + if config.Backoff { + backoff := 2*attemptsCount[resp.StatusCode] + 5 + log.Infof("retrying in %d seconds (attempt %d of %d)", backoff, attemptsCount[resp.StatusCode], config.MaxAttempts) + time.Sleep(time.Duration(backoff) * time.Second) + } + } return resp, nil + } func (t *JWTTransport) Client() *http.Client { @@ -211,27 +243,8 @@ func (t *JWTTransport) ResetToken() { // transport() returns a round tripper that retries once when the status is unauthorized, // and 5 times when the infrastructure is overloaded. func (t *JWTTransport) transport() http.RoundTripper { - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return &retryRoundTripper{ - next: &retryRoundTripper{ - next: transport, - maxAttempts: 5, - withBackOff: true, - retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout}, - }, - maxAttempts: 2, - withBackOff: false, - retryStatusCodes: []int{http.StatusUnauthorized, http.StatusForbidden}, - onBeforeRequest: func(attempt int) { - // reset the token only in the second attempt as this is when we know we had a 401 or 403 - // the second attempt is supposed to refresh the token - if attempt > 0 { - t.ResetToken() - } - }, + if t.Transport != nil { + return t.Transport } + return http.DefaultTransport } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index e0e521d6a6f..b702829efd3 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -72,6 +72,13 @@ func NewClient(config *Config) (*ApiClient, error) { UserAgent: config.UserAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, + RetryConfig: NewRetryConfig( + WithStatusCodeConfig(http.StatusUnauthorized, 2, false, true), + WithStatusCodeConfig(http.StatusForbidden, 2, false, true), + WithStatusCodeConfig(http.StatusTooManyRequests, 5, true, false), + WithStatusCodeConfig(http.StatusServiceUnavailable, 5, true, false), + WithStatusCodeConfig(http.StatusGatewayTimeout, 5, true, false), + ), } transport, baseURL := createTransport(config.URL) diff --git a/pkg/apiclient/retry_config.go b/pkg/apiclient/retry_config.go new file mode 100644 index 00000000000..8a0d1096f84 --- /dev/null +++ b/pkg/apiclient/retry_config.go @@ -0,0 +1,33 @@ +package apiclient + +type StatusCodeConfig struct { + MaxAttempts int + Backoff bool + InvalidateToken bool +} + +type RetryConfig struct { + StatusCodeConfig map[int]StatusCodeConfig +} + +type RetryConfigOption func(*RetryConfig) + +func NewRetryConfig(options ...RetryConfigOption) *RetryConfig { + rc := &RetryConfig{ + StatusCodeConfig: make(map[int]StatusCodeConfig), + } + for _, opt := range options { + opt(rc) + } + return rc +} + +func WithStatusCodeConfig(statusCode int, maxAttempts int, backOff bool, invalidateToken bool) RetryConfigOption { + return func(rc *RetryConfig) { + rc.StatusCodeConfig[statusCode] = StatusCodeConfig{ + MaxAttempts: maxAttempts, + Backoff: backOff, + InvalidateToken: invalidateToken, + } + } +} From 9088f31b7d046be2f5a26d64e21314400f5b28b7 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 24 May 2024 14:27:25 +0100 Subject: [PATCH 143/318] enhance: container discovery via labels (#2959) * wip: attempt to autodiscover via labels * wip: remove labels dep on docker acquistion * wip: remove labels dep on docker acquistion * wip: add debug * wip: try fix parser maps * wip: remove redundant pointer * wip: add debug * wip: cant type assert * wip: reinstate debug * wip: reinstate debug * wip: reinstate debug * wip: oops * wip: add a debug * wip: fix labels * wip: remove redundant paramter * wip: rename config option to be more self declarative * wip: update log wording * wip: the if check was not correct * wip: me lost * fix: add checks to typecast and log useful information * add tests for parseLabels * return nil instead of pointer to empty struct * simplify EvalContainer return value --------- Co-authored-by: Sebastien Blot --- pkg/acquisition/acquisition.go | 5 +- pkg/acquisition/modules/docker/docker.go | 75 ++++++++++++++++--- pkg/acquisition/modules/docker/docker_test.go | 52 +++++++++++++ pkg/acquisition/modules/docker/utils.go | 38 ++++++++++ 4 files changed, 158 insertions(+), 12 deletions(-) create mode 100644 pkg/acquisition/modules/docker/utils.go diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 677bf664e31..ab7d954cac1 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -235,7 +235,10 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Debugf("skipping empty item in %s", acquisFile) continue } - return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + if sub.Source != "docker" { + //docker is the only source that can be empty + return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + } } if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 9f1febf2cb7..3a694b99d76 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -41,7 +41,7 @@ type DockerConfiguration struct { ContainerID []string `yaml:"container_id"` ContainerNameRegexp []string `yaml:"container_name_regexp"` ContainerIDRegexp []string `yaml:"container_id_regexp"` - ForceInotify bool `yaml:"force_inotify"` + UseContainerLabels bool `yaml:"use_container_labels"` configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -87,10 +87,14 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { d.logger.Tracef("DockerAcquisition configuration: %+v", d.Config) } - if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 { + if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 && !d.Config.UseContainerLabels { return fmt.Errorf("no containers names or containers ID configuration provided") } + if d.Config.UseContainerLabels && (len(d.Config.ContainerName) > 0 || len(d.Config.ContainerID) > 0 || len(d.Config.ContainerIDRegexp) > 0 || len(d.Config.ContainerNameRegexp) > 0) { + return fmt.Errorf("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") + } + d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval) if err != nil { return fmt.Errorf("parsing 'check_interval' parameters: %s", d.CheckIntervalDuration) @@ -293,7 +297,7 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er d.logger.Debugf("container with id %s is already being read from", container.ID) continue } - if containerConfig, ok := d.EvalContainer(container); ok { + if containerConfig := d.EvalContainer(container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) dockerReader, err := d.Client.ContainerLogs(context.Background(), containerConfig.ID, *d.containerLogsOptions) @@ -375,10 +379,18 @@ func (d *DockerSource) getContainerTTY(containerId string) bool { return containerDetails.Config.Tty } -func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*ContainerConfig, bool) { +func (d *DockerSource) getContainerLabels(containerId string) map[string]interface{} { + containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) + if err != nil { + return map[string]interface{}{} + } + return parseLabels(containerDetails.Config.Labels) +} + +func (d *DockerSource) EvalContainer(container dockerTypes.Container) *ContainerConfig { for _, containerID := range d.Config.ContainerID { if containerID == container.ID { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } @@ -388,7 +400,7 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*Containe name = name[1:] } if name == containerName { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } @@ -396,20 +408,61 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*Containe for _, cont := range d.compiledContainerID { if matched := cont.MatchString(container.ID); matched { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } for _, cont := range d.compiledContainerName { for _, name := range container.Names { if matched := cont.MatchString(name); matched { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } } - return &ContainerConfig{}, false + if d.Config.UseContainerLabels { + parsedLabels := d.getContainerLabels(container.ID) + if len(parsedLabels) == 0 { + d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) + return nil + } + if _, ok := parsedLabels["enable"]; !ok { + d.logger.Errorf("container has 'crowdsec' labels set but no 'crowdsec.enable' key found") + return nil + } + enable, ok := parsedLabels["enable"].(string) + if !ok { + d.logger.Error("container has 'crowdsec.enable' label set but it's not a string") + return nil + } + if strings.ToLower(enable) != "true" { + d.logger.Debugf("container has 'crowdsec.enable' label not set to true ignoring container: %s", container.ID) + return nil + } + if _, ok = parsedLabels["labels"]; !ok { + d.logger.Error("container has 'crowdsec.enable' label set to true but no 'labels' keys found") + return nil + } + labelsTypeCast, ok := parsedLabels["labels"].(map[string]interface{}) + if !ok { + d.logger.Error("container has 'crowdsec.enable' label set to true but 'labels' is not a map") + return nil + } + d.logger.Debugf("container labels %+v", labelsTypeCast) + labels := make(map[string]string) + for k, v := range labelsTypeCast { + if v, ok := v.(string); ok { + log.Debugf("label %s is a string with value %s", k, v) + labels[k] = v + continue + } + d.logger.Errorf("label %s is not a string", k) + } + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(container.ID)} + } + + return nil } func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { @@ -449,7 +502,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha if _, ok := d.runningContainerState[container.ID]; ok { continue } - if containerConfig, ok := d.EvalContainer(container); ok { + if containerConfig := d.EvalContainer(container); containerConfig != nil { monitChan <- containerConfig } } @@ -522,7 +575,7 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types } l := types.Line{} l.Raw = line - l.Labels = d.Config.Labels + l.Labels = container.Labels l.Time = time.Now().UTC() l.Src = container.Name l.Process = true diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index 6c010f895d3..c2624229afd 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -341,3 +341,55 @@ func TestOneShot(t *testing.T) { } } } + +func TestParseLabels(t *testing.T) { + tests := []struct { + name string + labels map[string]string + expected map[string]interface{} + }{ + { + name: "bad label", + labels: map[string]string{"crowdsecfoo": "bar"}, + expected: map[string]interface{}{}, + }, + { + name: "simple label", + labels: map[string]string{"crowdsec.bar": "baz"}, + expected: map[string]interface{}{"bar": "baz"}, + }, + { + name: "multiple simple labels", + labels: map[string]string{"crowdsec.bar": "baz", "crowdsec.foo": "bar"}, + expected: map[string]interface{}{"bar": "baz", "foo": "bar"}, + }, + { + name: "multiple simple labels 2", + labels: map[string]string{"crowdsec.bar": "baz", "bla": "foo"}, + expected: map[string]interface{}{"bar": "baz"}, + }, + { + name: "end with dot", + labels: map[string]string{"crowdsec.bar.": "baz"}, + expected: map[string]interface{}{}, + }, + { + name: "consecutive dots", + labels: map[string]string{"crowdsec......bar": "baz"}, + expected: map[string]interface{}{}, + }, + { + name: "crowdsec labels", + labels: map[string]string{"crowdsec.labels.type": "nginx"}, + expected: map[string]interface{}{"labels": map[string]interface{}{"type": "nginx"}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + labels := parseLabels(test.labels) + assert.Equal(t, test.expected, labels) + }) + } + +} diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go new file mode 100644 index 00000000000..5439163e5b9 --- /dev/null +++ b/pkg/acquisition/modules/docker/utils.go @@ -0,0 +1,38 @@ +package dockeracquisition + +import ( + "strings" +) + +func parseLabels(labels map[string]string) map[string]interface{} { + result := make(map[string]interface{}) + for key, value := range labels { + parseKeyToMap(result, key, value) + } + return result +} + +func parseKeyToMap(m map[string]interface{}, key string, value string) { + if !strings.HasPrefix(key, "crowdsec") { + return + } + parts := strings.Split(key, ".") + + if len(parts) < 2 || parts[0] != "crowdsec" { + return + } + + for i := 0; i < len(parts); i++ { + if parts[i] == "" { + return + } + } + + for i := 1; i < len(parts)-1; i++ { + if _, ok := m[parts[i]]; !ok { + m[parts[i]] = make(map[string]interface{}) + } + m = m[parts[i]].(map[string]interface{}) + } + m[parts[len(parts)-1]] = value +} From f3341c13823ce7ab0c6b9123f4e9ed3d4af0d226 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 27 May 2024 10:15:38 +0200 Subject: [PATCH 144/318] Appsec: properly populate event (#2943) --- cmd/crowdsec-cli/alerts.go | 1 + cmd/crowdsec/crowdsec.go | 8 ++ cmd/crowdsec/serve.go | 3 + pkg/acquisition/modules/appsec/utils.go | 161 +++++++++++++++++++----- pkg/alertcontext/alertcontext.go | 16 +-- pkg/exprhelpers/expr_lib.go | 23 ++++ pkg/exprhelpers/geoip.go | 63 ++++++++++ pkg/exprhelpers/helpers.go | 42 +++++++ pkg/parser/enrich.go | 21 +--- pkg/parser/enrich_date.go | 6 +- pkg/parser/enrich_date_test.go | 2 +- pkg/parser/enrich_dns.go | 6 +- pkg/parser/enrich_geoip.go | 98 +++++---------- pkg/parser/enrich_unmarshal.go | 6 +- pkg/parser/node.go | 7 +- pkg/parser/node_test.go | 2 +- pkg/parser/parsing_test.go | 6 +- pkg/parser/runtime.go | 2 +- pkg/parser/unix_parser.go | 2 +- 19 files changed, 333 insertions(+), 142 deletions(-) create mode 100644 pkg/exprhelpers/geoip.go diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index d31c99e1b12..4cc4a992c43 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -493,6 +493,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { switch cfg.Cscli.Output { case "human": if err := cli.displayOneAlert(alert, details); err != nil { + log.Warnf("unable to display alert with id %s: %s", alertID, err) continue } case "json": diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 8f07d165f6b..d226e3a5796 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -19,6 +19,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/parser" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -32,6 +33,13 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] return nil, nil, fmt.Errorf("while loading context: %w", err) } + err = exprhelpers.GeoIPInit(hub.GetDataDir()) + + if err != nil { + //GeoIP databases are not mandatory, do not make crowdsec fail if they are not present + log.Warnf("unable to initialize GeoIP: %s", err) + } + // Start loading configs csParsers := parser.NewParsers(hub) if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil { diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index a27622a641a..497215d74a1 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -177,6 +177,9 @@ func ShutdownCrowdsecRoutines() error { // He's dead, Jim. crowdsecTomb.Kill(nil) + // close the potential geoips reader we have to avoid leaking ressources on reload + exprhelpers.GeoIPClose() + return reterr } diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 7600617965a..02ded2a2437 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -1,20 +1,46 @@ package appsecacquisition import ( - "encoding/json" "fmt" + "net" + "slices" + "strconv" "time" "github.com/crowdsecurity/coraza/v3/collection" "github.com/crowdsecurity/coraza/v3/types/variables" + "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/oschwald/geoip2-golang" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) +var appsecMetaKeys = []string{ + "id", + "name", + "method", + "uri", + "matched_zones", + "msg", +} + +func appendMeta(meta models.Meta, key string, value string) models.Meta { + if value == "" { + return meta + } + + meta = append(meta, &models.MetaItems0{ + Key: key, + Value: value, + }) + return meta +} + func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { //if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI if !inEvt.Appsec.HasInBandMatches { @@ -23,48 +49,127 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { evt := types.Event{} evt.Type = types.APPSEC evt.Process = true + sourceIP := inEvt.Parsed["source_ip"] source := models.Source{ - Value: ptr.Of(inEvt.Parsed["source_ip"]), - IP: inEvt.Parsed["source_ip"], + Value: &sourceIP, + IP: sourceIP, Scope: ptr.Of(types.Ip), } + asndata, err := exprhelpers.GeoIPASNEnrich(sourceIP) + + if err != nil { + log.Errorf("Unable to enrich ip '%s' for ASN: %s", sourceIP, err) + } else if asndata != nil { + record := asndata.(*geoip2.ASN) + source.AsName = record.AutonomousSystemOrganization + source.AsNumber = fmt.Sprintf("%d", record.AutonomousSystemNumber) + } + + cityData, err := exprhelpers.GeoIPEnrich(sourceIP) + if err != nil { + log.Errorf("Unable to enrich ip '%s' for geo data: %s", sourceIP, err) + } else if cityData != nil { + record := cityData.(*geoip2.City) + source.Cn = record.Country.IsoCode + source.Latitude = float32(record.Location.Latitude) + source.Longitude = float32(record.Location.Longitude) + } + + rangeData, err := exprhelpers.GeoIPRangeEnrich(sourceIP) + if err != nil { + log.Errorf("Unable to enrich ip '%s' for range: %s", sourceIP, err) + } else if rangeData != nil { + record := rangeData.(*net.IPNet) + source.Range = record.String() + } + evt.Overflow.Sources = make(map[string]models.Source) - evt.Overflow.Sources["ip"] = source + evt.Overflow.Sources[sourceIP] = source alert := models.Alert{} alert.Capacity = ptr.Of(int32(1)) - alert.Events = make([]*models.Event, 0) - alert.Meta = make(models.Meta, 0) - for _, key := range []string{"target_uri", "method"} { + alert.Events = make([]*models.Event, len(evt.Appsec.GetRuleIDs())) - valueByte, err := json.Marshal([]string{inEvt.Parsed[key]}) - if err != nil { - log.Debugf("unable to serialize key %s", key) + now := ptr.Of(time.Now().UTC().Format(time.RFC3339)) + + tmpAppsecContext := make(map[string][]string) + + for _, matched_rule := range inEvt.Appsec.MatchedRules { + evtRule := models.Event{} + + evtRule.Timestamp = now + + evtRule.Meta = make(models.Meta, 0) + + for _, key := range appsecMetaKeys { + + if tmpAppsecContext[key] == nil { + tmpAppsecContext[key] = make([]string, 0) + } + + switch value := matched_rule[key].(type) { + case string: + evtRule.Meta = appendMeta(evtRule.Meta, key, value) + if value != "" && !slices.Contains(tmpAppsecContext[key], value) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], value) + } + case int: + val := strconv.Itoa(value) + evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], val) + } + case []string: + for _, v := range value { + evtRule.Meta = appendMeta(evtRule.Meta, key, v) + if v != "" && !slices.Contains(tmpAppsecContext[key], v) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], v) + } + } + case []int: + for _, v := range value { + val := strconv.Itoa(v) + evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], val) + } + + } + default: + val := fmt.Sprintf("%v", value) + evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], val) + } + + } + } + alert.Events = append(alert.Events, &evtRule) + } + + metas := make([]*models.MetaItems0, 0) + + for key, values := range tmpAppsecContext { + if len(values) == 0 { continue } + valueStr, err := alertcontext.TruncateContext(values, alertcontext.MaxContextValueLen) + if err != nil { + log.Warningf(err.Error()) + } + meta := models.MetaItems0{ Key: key, - Value: string(valueByte), - } - alert.Meta = append(alert.Meta, &meta) - } - matchedZones := inEvt.Appsec.GetMatchedZones() - if matchedZones != nil { - valueByte, err := json.Marshal(matchedZones) - if err != nil { - log.Debugf("unable to serialize key matched_zones") - } else { - meta := models.MetaItems0{ - Key: "matched_zones", - Value: string(valueByte), - } - alert.Meta = append(alert.Meta, &meta) + Value: valueStr, } + metas = append(metas, &meta) } - alert.EventsCount = ptr.Of(int32(1)) + alert.Meta = metas + + alert.EventsCount = ptr.Of(int32(len(alert.Events))) alert.Leakspeed = ptr.Of("") alert.Scenario = ptr.Of(inEvt.Appsec.MatchedRules.GetName()) alert.ScenarioHash = ptr.Of(inEvt.Appsec.MatchedRules.GetHash()) @@ -200,7 +305,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR }) for _, rule := range req.Tx.MatchedRules() { - if rule.Message() == "" || rule.DisruptiveAction() == "pass" || rule.DisruptiveAction() == "allow" { + if rule.Message() == "" { r.logger.Tracef("discarding rule %d (action: %s)", rule.Rule().ID(), rule.DisruptiveAction()) continue } @@ -242,7 +347,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR corazaRule := map[string]interface{}{ "id": rule.Rule().ID(), - "uri": evt.Parsed["uri"], + "uri": evt.Parsed["target_uri"], "rule_type": kind, "method": evt.Parsed["method"], "disruptive": rule.Disruptive(), diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 8b0648ca0eb..9946d694363 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -16,7 +16,7 @@ import ( ) const ( - maxContextValueLen = 4000 + MaxContextValueLen = 4000 ) var alertContext = Context{} @@ -46,13 +46,13 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { } if valueLength == 0 { - clog.Debugf("No console context value length provided, using default: %d", maxContextValueLen) - valueLength = maxContextValueLen + clog.Debugf("No console context value length provided, using default: %d", MaxContextValueLen) + valueLength = MaxContextValueLen } - if valueLength > maxContextValueLen { - clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, maxContextValueLen) - valueLength = maxContextValueLen + if valueLength > MaxContextValueLen { + clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, MaxContextValueLen) + valueLength = MaxContextValueLen } alertContext = Context{ @@ -85,7 +85,7 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { return nil } -func truncate(values []string, contextValueLen int) (string, error) { +func TruncateContext(values []string, contextValueLen int) (string, error) { valueByte, err := json.Marshal(values) if err != nil { return "", fmt.Errorf("unable to dump metas: %w", err) @@ -159,7 +159,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { continue } - valueStr, err := truncate(values, alertContext.ContextValueLen) + valueStr, err := TruncateContext(values, alertContext.ContextValueLen) if err != nil { log.Warningf(err.Error()) } diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 5041b234db1..19b25e25895 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -1,9 +1,11 @@ package exprhelpers import ( + "net" "time" "github.com/crowdsecurity/crowdsec/pkg/cticlient" + "github.com/oschwald/geoip2-golang" ) type exprCustomFunc struct { @@ -469,6 +471,27 @@ var exprFuncs = []exprCustomFunc{ new(func(string) bool), }, }, + { + name: "GeoIPEnrich", + function: GeoIPEnrich, + signature: []interface{}{ + new(func(string) *geoip2.City), + }, + }, + { + name: "GeoIPASNEnrich", + function: GeoIPASNEnrich, + signature: []interface{}{ + new(func(string) *geoip2.ASN), + }, + }, + { + name: "GeoIPRangeEnrich", + function: GeoIPRangeEnrich, + signature: []interface{}{ + new(func(string) *net.IPNet), + }, + }, } //go 1.20 "CutPrefix": strings.CutPrefix, diff --git a/pkg/exprhelpers/geoip.go b/pkg/exprhelpers/geoip.go new file mode 100644 index 00000000000..fb0c344d884 --- /dev/null +++ b/pkg/exprhelpers/geoip.go @@ -0,0 +1,63 @@ +package exprhelpers + +import ( + "net" +) + +func GeoIPEnrich(params ...any) (any, error) { + if geoIPCityReader == nil { + return nil, nil + } + + ip := params[0].(string) + + parsedIP := net.ParseIP(ip) + + city, err := geoIPCityReader.City(parsedIP) + + if err != nil { + return nil, err + } + + return city, nil +} + +func GeoIPASNEnrich(params ...any) (any, error) { + if geoIPASNReader == nil { + return nil, nil + } + + ip := params[0].(string) + + parsedIP := net.ParseIP(ip) + asn, err := geoIPASNReader.ASN(parsedIP) + + if err != nil { + return nil, err + } + + return asn, nil +} + +func GeoIPRangeEnrich(params ...any) (any, error) { + if geoIPRangeReader == nil { + return nil, nil + } + + ip := params[0].(string) + + var dummy interface{} + + parsedIP := net.ParseIP(ip) + rangeIP, ok, err := geoIPRangeReader.LookupNetwork(parsedIP, &dummy) + + if err != nil { + return nil, err + } + + if !ok { + return nil, nil + } + + return rangeIP, nil +} diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index e4e38e48474..575425ef83e 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -20,6 +20,8 @@ import ( "github.com/c-robinson/iplib" "github.com/cespare/xxhash/v2" "github.com/davecgh/go-spew/spew" + "github.com/oschwald/geoip2-golang" + "github.com/oschwald/maxminddb-golang" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "github.com/umahmood/haversine" @@ -55,6 +57,10 @@ var exprFunctionOptions []expr.Option var keyValuePattern = regexp.MustCompile(`(?P[^=\s]+)=(?:"(?P[^"\\]*(?:\\.[^"\\]*)*)"|(?P[^=\s]+)|\s*)`) +var geoIPCityReader *geoip2.Reader +var geoIPASNReader *geoip2.Reader +var geoIPRangeReader *maxminddb.Reader + func GetExprOptions(ctx map[string]interface{}) []expr.Option { if len(exprFunctionOptions) == 0 { exprFunctionOptions = []expr.Option{} @@ -72,6 +78,42 @@ func GetExprOptions(ctx map[string]interface{}) []expr.Option { return ret } +func GeoIPInit(datadir string) error { + var err error + + geoIPCityReader, err = geoip2.Open(filepath.Join(datadir, "GeoLite2-City.mmdb")) + if err != nil { + log.Errorf("unable to open GeoLite2-City.mmdb : %s", err) + return err + } + + geoIPASNReader, err = geoip2.Open(filepath.Join(datadir, "GeoLite2-ASN.mmdb")) + if err != nil { + log.Errorf("unable to open GeoLite2-ASN.mmdb : %s", err) + return err + } + + geoIPRangeReader, err = maxminddb.Open(filepath.Join(datadir, "GeoLite2-ASN.mmdb")) + if err != nil { + log.Errorf("unable to open GeoLite2-ASN.mmdb : %s", err) + return err + } + + return nil +} + +func GeoIPClose() { + if geoIPCityReader != nil { + geoIPCityReader.Close() + } + if geoIPASNReader != nil { + geoIPASNReader.Close() + } + if geoIPRangeReader != nil { + geoIPRangeReader.Close() + } +} + func Init(databaseClient *database.Client) error { dataFile = make(map[string][]string) dataFileRegex = make(map[string][]*regexp.Regexp) diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go index 5180b9a5fb9..661410d20d3 100644 --- a/pkg/parser/enrich.go +++ b/pkg/parser/enrich.go @@ -7,7 +7,7 @@ import ( ) /* should be part of a package shared with enrich/geoip.go */ -type EnrichFunc func(string, *types.Event, interface{}, *log.Entry) (map[string]string, error) +type EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) type InitFunc func(map[string]string) (interface{}, error) type EnricherCtx struct { @@ -16,59 +16,42 @@ type EnricherCtx struct { type Enricher struct { Name string - InitFunc InitFunc EnrichFunc EnrichFunc - Ctx interface{} } /* mimic plugin loading */ -func Loadplugin(path string) (EnricherCtx, error) { +func Loadplugin() (EnricherCtx, error) { enricherCtx := EnricherCtx{} enricherCtx.Registered = make(map[string]*Enricher) - enricherConfig := map[string]string{"datadir": path} - EnrichersList := []*Enricher{ { Name: "GeoIpCity", - InitFunc: GeoIPCityInit, EnrichFunc: GeoIpCity, }, { Name: "GeoIpASN", - InitFunc: GeoIPASNInit, EnrichFunc: GeoIpASN, }, { Name: "IpToRange", - InitFunc: IpToRangeInit, EnrichFunc: IpToRange, }, { Name: "reverse_dns", - InitFunc: reverseDNSInit, EnrichFunc: reverse_dns, }, { Name: "ParseDate", - InitFunc: parseDateInit, EnrichFunc: ParseDate, }, { Name: "UnmarshalJSON", - InitFunc: unmarshalInit, EnrichFunc: unmarshalJSON, }, } for _, enricher := range EnrichersList { - log.Debugf("Initiating enricher '%s'", enricher.Name) - pluginCtx, err := enricher.InitFunc(enricherConfig) - if err != nil { - log.Errorf("unable to register plugin '%s': %v", enricher.Name, err) - continue - } - enricher.Ctx = pluginCtx log.Infof("Successfully registered enricher '%s'", enricher.Name) enricherCtx.Registered[enricher.Name] = enricher } diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 20828af9037..748a466d7c3 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -56,7 +56,7 @@ func GenDateParse(date string) (string, time.Time) { return "", time.Time{} } -func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[string]string, error) { +func ParseDate(in string, p *types.Event, plog *log.Entry) (map[string]string, error) { var ret = make(map[string]string) var strDate string @@ -105,7 +105,3 @@ func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[s return ret, nil } - -func parseDateInit(cfg map[string]string) (interface{}, error) { - return nil, nil -} diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 084ded52573..085ef5ca342 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -48,7 +48,7 @@ func TestDateParse(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, nil, logger) + strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, logger) cstest.RequireErrorContains(t, err, tt.expectedErr) if tt.expectedErr != "" { return diff --git a/pkg/parser/enrich_dns.go b/pkg/parser/enrich_dns.go index f622e6c359a..1ff5b0f4f16 100644 --- a/pkg/parser/enrich_dns.go +++ b/pkg/parser/enrich_dns.go @@ -11,7 +11,7 @@ import ( /* All plugins must export a list of function pointers for exported symbols */ //var ExportedFuncs = []string{"reverse_dns"} -func reverse_dns(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { +func reverse_dns(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { ret := make(map[string]string) if field == "" { return nil, nil @@ -25,7 +25,3 @@ func reverse_dns(field string, p *types.Event, ctx interface{}, plog *log.Entry) ret["reverse_dns"] = rets[0] return ret, nil } - -func reverseDNSInit(cfg map[string]string) (interface{}, error) { - return nil, nil -} diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 0a263c82793..5e1fdbfc437 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -6,53 +6,53 @@ import ( "strconv" "github.com/oschwald/geoip2-golang" - "github.com/oschwald/maxminddb-golang" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) -func IpToRange(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { - var dummy interface{} - ret := make(map[string]string) - +func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { if field == "" { return nil, nil } - ip := net.ParseIP(field) - if ip == nil { - plog.Infof("Can't parse ip %s, no range enrich", field) - return nil, nil - } - net, ok, err := ctx.(*maxminddb.Reader).LookupNetwork(ip, &dummy) + + r, err := exprhelpers.GeoIPRangeEnrich(field) + if err != nil { - plog.Errorf("Failed to fetch network for %s : %v", ip.String(), err) - return nil, nil + plog.Errorf("Unable to enrich ip '%s'", field) + return nil, nil //nolint:nilerr } - if !ok { - plog.Debugf("Unable to find range of %s", ip.String()) + + if r == nil { + plog.Warnf("No range found for ip '%s'", field) return nil, nil } - ret["SourceRange"] = net.String() + + record := r.(*net.IPNet) + + ret := make(map[string]string) + ret["SourceRange"] = record.String() + return ret, nil } -func GeoIpASN(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { - ret := make(map[string]string) +func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { if field == "" { return nil, nil } - ip := net.ParseIP(field) - if ip == nil { - plog.Infof("Can't parse ip %s, no ASN enrich", ip) - return nil, nil - } - record, err := ctx.(*geoip2.Reader).ASN(ip) + r, err := exprhelpers.GeoIPASNEnrich(field) + if err != nil { plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + + record := r.(*geoip2.ASN) + + ret := make(map[string]string) + ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) ret["ASNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) ret["ASNOrg"] = record.AutonomousSystemOrganization @@ -62,21 +62,21 @@ func GeoIpASN(field string, p *types.Event, ctx interface{}, plog *log.Entry) (m return ret, nil } -func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { - ret := make(map[string]string) +func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { if field == "" { return nil, nil } - ip := net.ParseIP(field) - if ip == nil { - plog.Infof("Can't parse ip %s, no City enrich", ip) - return nil, nil - } - record, err := ctx.(*geoip2.Reader).City(ip) + + r, err := exprhelpers.GeoIPEnrich(field) + if err != nil { - plog.Debugf("Unable to enrich ip '%s'", ip) + plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + + record := r.(*geoip2.City) + ret := make(map[string]string) + if record.Country.IsoCode != "" { ret["IsoCode"] = record.Country.IsoCode ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) @@ -88,7 +88,7 @@ func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) ( ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion) } else { ret["IsoCode"] = "" - ret["IsInEU"] = strconv.FormatBool(false) + ret["IsInEU"] = "false" } ret["Latitude"] = fmt.Sprintf("%f", record.Location.Latitude) @@ -98,33 +98,3 @@ func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) ( return ret, nil } - -func GeoIPCityInit(cfg map[string]string) (interface{}, error) { - dbCityReader, err := geoip2.Open(cfg["datadir"] + "/GeoLite2-City.mmdb") - if err != nil { - log.Debugf("couldn't open geoip : %v", err) - return nil, err - } - - return dbCityReader, nil -} - -func GeoIPASNInit(cfg map[string]string) (interface{}, error) { - dbASReader, err := geoip2.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") - if err != nil { - log.Debugf("couldn't open geoip : %v", err) - return nil, err - } - - return dbASReader, nil -} - -func IpToRangeInit(cfg map[string]string) (interface{}, error) { - ipToRangeReader, err := maxminddb.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") - if err != nil { - log.Debugf("couldn't open geoip : %v", err) - return nil, err - } - - return ipToRangeReader, nil -} diff --git a/pkg/parser/enrich_unmarshal.go b/pkg/parser/enrich_unmarshal.go index dce9c75d466..7ff91b70aea 100644 --- a/pkg/parser/enrich_unmarshal.go +++ b/pkg/parser/enrich_unmarshal.go @@ -8,7 +8,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func unmarshalJSON(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { +func unmarshalJSON(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { err := json.Unmarshal([]byte(p.Line.Raw), &p.Unmarshaled) if err != nil { plog.Errorf("could not unmarshal JSON: %s", err) @@ -17,7 +17,3 @@ func unmarshalJSON(field string, p *types.Event, ctx interface{}, plog *log.Entr plog.Tracef("unmarshaled JSON: %+v", p.Unmarshaled) return nil, nil } - -func unmarshalInit(cfg map[string]string) (interface{}, error) { - return nil, nil -} diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 244f361d6b8..11ffb8aa7fa 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -64,8 +64,9 @@ type Node struct { Data []*types.DataSource `yaml:"data,omitempty"` } -func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { - // stage is being set automagically +func (n *Node) validate(ectx EnricherCtx) error { + + //stage is being set automagically if n.Stage == "" { return errors.New("stage needs to be an existing stage") } @@ -635,7 +636,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return errors.New("Node is empty") } - if err := n.validate(pctx, ectx); err != nil { + if err := n.validate(ectx); err != nil { return err } diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go index d85aa82a8ae..be12176bff9 100644 --- a/pkg/parser/node_test.go +++ b/pkg/parser/node_test.go @@ -56,7 +56,7 @@ func TestParserConfigs(t *testing.T) { t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests)) } - err = CfgTests[idx].NodeCfg.validate(pctx, EnricherCtx{}) + err = CfgTests[idx].NodeCfg.validate(EnricherCtx{}) if CfgTests[idx].Valid == true && err != nil { t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index d009bd0c515..d97dce8d20f 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -152,7 +152,11 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { //Load enrichment datadir := "./test_data/" - ectx, err = Loadplugin(datadir) + err = exprhelpers.GeoIPInit(datadir) + if err != nil { + log.Fatalf("unable to initialize GeoIP: %s", err) + } + ectx, err = Loadplugin() if err != nil { log.Fatalf("failed to load plugin geoip : %v", err) } diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index afdf88dc873..1596ef5ffd9 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -155,7 +155,7 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error { /*still way too hackish, but : inject all the results in enriched, and */ if enricherPlugin, ok := n.EnrichFunctions.Registered[static.Method]; ok { clog.Tracef("Found method '%s'", static.Method) - ret, err := enricherPlugin.EnrichFunc(value, event, enricherPlugin.Ctx, n.Logger.WithField("method", static.Method)) + ret, err := enricherPlugin.EnrichFunc(value, event, n.Logger.WithField("method", static.Method)) if err != nil { clog.Errorf("method '%s' returned an error : %v", static.Method, err) } diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 720bac3d1fe..280d122ecc1 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -117,7 +117,7 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { */ log.Infof("Loading enrich plugins") - parsers.EnricherCtx, err = Loadplugin(cConfig.ConfigPaths.DataDir) + parsers.EnricherCtx, err = Loadplugin() if err != nil { return parsers, fmt.Errorf("failed to load enrich plugin : %v", err) } From 7d6514c7cc457331d3707d1bd2dc92af76a5b2e8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 30 May 2024 09:45:43 +0200 Subject: [PATCH 145/318] update user agent with version and platform information (#3035) * update user agent with version and platform information * remove unused import * user agent: omit tag correctly if git is dirty --- Makefile | 2 +- cmd/crowdsec-cli/alerts.go | 5 ++--- cmd/crowdsec-cli/capi.go | 7 +++---- cmd/crowdsec-cli/console.go | 4 ++-- cmd/crowdsec-cli/decisions.go | 5 ++--- cmd/crowdsec-cli/lapi.go | 7 +++---- cmd/crowdsec-cli/notifications.go | 4 ++-- cmd/crowdsec/lapiclient.go | 5 ++--- .../loki/internal/lokiclient/loki_client.go | 2 +- pkg/apiclient/alerts_service_test.go | 10 +++++----- pkg/apiclient/auth_service_test.go | 11 +++++------ pkg/apiclient/client_http_test.go | 8 ++++---- pkg/apiclient/client_test.go | 17 +++++++++-------- pkg/apiclient/decisions_service_test.go | 7 +++---- pkg/apiserver/apic.go | 4 ++-- pkg/apiserver/apic_metrics_test.go | 6 ++---- pkg/apiserver/apic_test.go | 15 ++++++++------- pkg/cwhub/cwhub.go | 4 ++-- pkg/cwversion/version.go | 13 +++++++------ pkg/metabase/api.go | 6 +++--- 20 files changed, 68 insertions(+), 74 deletions(-) diff --git a/Makefile b/Makefile index 9e21abee02a..324be04683d 100644 --- a/Makefile +++ b/Makefile @@ -78,7 +78,7 @@ LD_OPTS_VARS= \ -X '$(GO_MODULE_NAME)/pkg/csconfig.defaultDataDir=$(DEFAULT_DATADIR)' ifneq (,$(DOCKER_BUILD)) -LD_OPTS_VARS += -X '$(GO_MODULE_NAME)/pkg/cwversion.System=docker' +LD_OPTS_VARS += -X 'github.com/crowdsecurity/go-cs-lib/version.System=docker' endif GO_TAGS := netgo,osusergo,sqlite_omit_load_extension diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 4cc4a992c43..d32af6ae56c 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -19,10 +19,9 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -209,7 +208,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index b89d9c7edb0..f5940d71b66 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -12,12 +12,11 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -81,7 +80,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: capiUser, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: CAPIURLPrefix, }, nil) @@ -173,7 +172,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Client, err = apiclient.NewDefaultClient(apiURL, CAPIURLPrefix, - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil) if err != nil { return fmt.Errorf("init default client: %w", err) diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 4aba689aa05..149b7656884 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -18,12 +18,12 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -147,7 +147,7 @@ After running this command your will need to validate the enrollment in the weba MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, Scenarios: scenarios, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v3", }) diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 3fb790633b5..9b9159561be 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -17,9 +17,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -150,7 +149,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { Client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 7cffd7ffc7f..0d7ebc64431 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -15,13 +15,12 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" @@ -53,7 +52,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Client, err = apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil) if err != nil { return fmt.Errorf("init default client: %w", err) @@ -120,7 +119,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: lapiUser, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: LAPIURLPrefix, }, nil) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index cb102df6928..84dd6b941c9 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -23,13 +23,13 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -462,7 +462,7 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al client, err := apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index f12aea5ac0d..ae23850eb0a 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -8,10 +8,9 @@ import ( "github.com/go-openapi/strfmt" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -47,7 +46,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. MachineID: credentials.Login, Password: password, Scenarios: installedScenariosAndAppsecRules, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index d2af4e8af28..be14939c44d 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -321,6 +321,6 @@ func NewLokiClient(config Config) *LokiClient { if config.Username != "" || config.Password != "" { headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) } - headers["User-Agent"] = "Crowdsec " + cwversion.VersionStr() + headers["User-Agent"] = cwversion.UserAgent() return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 31a947556bb..12ef2d295f4 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -13,8 +13,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -35,7 +35,7 @@ func TestAlertsListAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -214,7 +214,7 @@ func TestAlertsGetAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -388,7 +388,7 @@ func TestAlertsCreateAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -430,7 +430,7 @@ func TestAlertsDeleteAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index f5de827a121..3e887149a98 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -14,8 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/go-cs-lib/version" - + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -88,7 +87,7 @@ func TestWatcherRegister(t *testing.T) { clientconfig := Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", } @@ -126,7 +125,7 @@ func TestWatcherAuth(t *testing.T) { clientConfig := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -207,7 +206,7 @@ func TestWatcherUnregister(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -261,7 +260,7 @@ func TestWatcherEnroll(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index a7582eaf437..4bdfe1d0da5 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -2,7 +2,6 @@ package apiclient import ( "context" - "fmt" "net/http" "net/url" "testing" @@ -11,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestNewRequestInvalid(t *testing.T) { @@ -25,7 +25,7 @@ func TestNewRequestInvalid(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -57,7 +57,7 @@ func TestNewRequestTimeout(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index d3296c4b67f..2adba170584 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -17,7 +17,8 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) /*this is a ripoff of google/go-github approach : @@ -96,7 +97,7 @@ func TestNewClientOk(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -133,7 +134,7 @@ func TestNewClientOk_UnixSocket(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -171,7 +172,7 @@ func TestNewClientKo(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -249,7 +250,7 @@ func TestNewClientRegisterKO(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -280,7 +281,7 @@ func TestNewClientRegisterOK(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -313,7 +314,7 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -343,7 +344,7 @@ func TestNewClientBadAnswer(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index fb2fb7342f7..97b5aa26482 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -2,7 +2,6 @@ package apiclient import ( "context" - "fmt" "net/http" "net/url" "testing" @@ -13,8 +12,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/modelscapi" ) @@ -403,7 +402,7 @@ func TestDeleteDecisions(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -504,7 +503,7 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { // client, err := NewClient(&Config{ // MachineID: "test_login", // Password: "test_password", -// UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), +// UserAgent: cwversion.UserAgent(), // URL: apiURL, // VersionPrefix: "v1", // }) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 3f646071b0e..b6c560c0a5d 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -20,10 +20,10 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" @@ -215,7 +215,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 529dd6c6839..f3b9b352316 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -2,7 +2,6 @@ package apiserver import ( "context" - "fmt" "net/url" "testing" "time" @@ -11,9 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestAPICSendMetrics(t *testing.T) { @@ -73,7 +71,7 @@ func TestAPICSendMetrics(t *testing.T) { apiClient, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 74c627cd020..c5a39455ff9 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -26,6 +26,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -675,7 +676,7 @@ func TestAPICWhitelists(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -816,7 +817,7 @@ func TestAPICPullTop(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -904,7 +905,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -996,7 +997,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -1023,7 +1024,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -1105,7 +1106,7 @@ func TestAPICPush(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -1164,7 +1165,7 @@ func TestAPICPull(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 0496834e472..0a9cc443ce0 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) // hubTransport wraps a Transport to set a custom User-Agent. @@ -17,7 +17,7 @@ type hubTransport struct { } func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", "crowdsec/"+version.String()) + req.Header.Set("User-Agent", cwversion.UserAgent()) return t.RoundTripper.RoundTrip(req) } diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index ac51567c2fc..d13767e665b 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "net/http" - "runtime" "strings" goversion "github.com/hashicorp/go-version" @@ -14,7 +13,6 @@ import ( var ( Codename string // = "SoumSoum" - System = runtime.GOOS // = "linux" Libre2 = "WebAssembly" ) @@ -26,9 +24,10 @@ const ( ) func versionWithTag() string { + // if the version number already contains the tag, don't duplicate it ret := version.Version - if !strings.HasSuffix(ret, version.Tag) { + if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g" + version.Tag + "-dirty") { ret += "-" + version.Tag } @@ -40,8 +39,9 @@ func FullString() string { ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) - ret += fmt.Sprintf("Platform: %s\n", System) + ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) + ret += fmt.Sprintf("User-Agent: %s\n", UserAgent()) ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) @@ -50,10 +50,11 @@ func FullString() string { return ret } -func VersionStr() string { - return fmt.Sprintf("%s-%s-%s", version.Version, System, version.Tag) +func UserAgent() string { + return "crowdsec/" + versionWithTag() + "-" + version.System } +// VersionStrip remove the tag from the version string, used to match with a hub branch func VersionStrip() string { ret := strings.Split(version.Version, "~") ret = strings.Split(ret[0], "-") diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go index bded4c9e83d..387e8d151e0 100644 --- a/pkg/metabase/api.go +++ b/pkg/metabase/api.go @@ -6,10 +6,10 @@ import ( "net/http" "time" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/dghubble/sling" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) type MBClient struct { @@ -38,7 +38,7 @@ var ( func NewMBClient(url string) (*MBClient, error) { httpClient := &http.Client{Timeout: 20 * time.Second} return &MBClient{ - CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", fmt.Sprintf("crowdsec/%s", version.String())), + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", cwversion.UserAgent()), Client: httpClient, }, nil } From 16bfab86c8e0de6631b8fcea99b1da009affb902 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 31 May 2024 09:38:43 +0200 Subject: [PATCH 146/318] check type assertion in geoip enrichers (#3040) --- pkg/parser/enrich_geoip.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 5e1fdbfc437..58732129af8 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -29,7 +29,11 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string return nil, nil } - record := r.(*net.IPNet) + record, ok := r.(*net.IPNet) + + if !ok { + return nil, nil + } ret := make(map[string]string) ret["SourceRange"] = record.String() @@ -49,7 +53,11 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, return nil, nil //nolint:nilerr } - record := r.(*geoip2.ASN) + record, ok := r.(*geoip2.ASN) + + if !ok { + return nil, nil + } ret := make(map[string]string) @@ -74,7 +82,12 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string return nil, nil //nolint:nilerr } - record := r.(*geoip2.City) + record, ok := r.(*geoip2.City) + + if !ok { + return nil, nil + } + ret := make(map[string]string) if record.Country.IsoCode != "" { From 02e2c8aed74e4cc62632cd70cd0c76f007190dc6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 15:00:38 +0200 Subject: [PATCH 147/318] deps: use ent 0.13.1 (#3023) --- go.mod | 10 +++++----- go.sum | 23 ++++++++++++----------- pkg/database/ent/alert.go | 8 +++----- pkg/database/ent/decision.go | 8 +++----- pkg/database/ent/event.go | 8 +++----- pkg/database/ent/generate.go | 2 +- pkg/database/ent/meta.go | 8 +++----- pkg/database/ent/runtime/runtime.go | 4 ++-- 8 files changed, 32 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index 93f3e4cd806..1bc63a470df 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ go 1.22 // toolchain go1.21.3 require ( - entgo.io/ent v0.12.5 + entgo.io/ent v0.13.1 github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 @@ -83,7 +83,7 @@ require ( github.com/wasilibs/go-re2 v1.3.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 - golang.org/x/mod v0.11.0 + golang.org/x/mod v0.15.0 golang.org/x/sys v0.19.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 @@ -96,7 +96,7 @@ require ( ) require ( - ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 // indirect + ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect @@ -130,7 +130,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -204,7 +204,7 @@ require ( golang.org/x/sync v0.6.0 // indirect golang.org/x/term v0.19.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect + golang.org/x/tools v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect diff --git a/go.sum b/go.sum index 310cbd85ab7..ba4e6267bb9 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,9 @@ -ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 h1:JnYs/y8RJ3+MiIUp+3RgyyeO48VHLAZimqiaZYnMKk8= -ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= -entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= -entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= +entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE= +entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -311,8 +311,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -451,8 +452,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.3.0 h1:jX8FDLfW4ThVXctBNZ+3cIWnCSnrACDV73r76dy0aQQ= github.com/leodido/go-urn v1.3.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -767,8 +768,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -888,8 +889,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 h1:0wxTF6pSjIIhNt7mo9GvjDfzyCOiWhmICgtO/Ah948s= -golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 6da9f0efe76..8bfe0badc09 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -89,12 +89,10 @@ type AlertEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e AlertEdges) OwnerOrErr() (*Machine, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: machine.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: machine.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index 1cc0df4c784..4a6dc728509 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -68,12 +68,10 @@ type DecisionEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e DecisionEdges) OwnerOrErr() (*Alert, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: alert.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: alert.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index 10e6d01c9d5..b57f1f34ac9 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -46,12 +46,10 @@ type EventEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e EventEdges) OwnerOrErr() (*Alert, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: alert.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: alert.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/generate.go b/pkg/database/ent/generate.go index 5f4b39eec90..8ada999d7ab 100644 --- a/pkg/database/ent/generate.go +++ b/pkg/database/ent/generate.go @@ -1,4 +1,4 @@ package ent -//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.12.5 generate ./schema +//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.13.1 generate ./schema diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index 768358ca2bf..7e29627957c 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -46,12 +46,10 @@ type MetaEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e MetaEdges) OwnerOrErr() (*Alert, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: alert.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: alert.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index d10a2fb5459..9cb9d96258a 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.12.5" // Version of ent codegen. - Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen. + Version = "v0.13.1" // Version of ent codegen. + Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. ) From 6dbc5fd522889f0441b6705bebe61c46f74833fe Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 15:19:48 +0200 Subject: [PATCH 148/318] db: mark immutable columns / remove unused (#3024) * db: mark immutable columns * db: drop unused column * lint --- pkg/database/ent/alert_update.go | 1022 ------------------------- pkg/database/ent/bouncer.go | 13 +- pkg/database/ent/bouncer/bouncer.go | 10 - pkg/database/ent/bouncer/where.go | 55 -- pkg/database/ent/bouncer_create.go | 22 - pkg/database/ent/bouncer_update.go | 120 --- pkg/database/ent/configitem_update.go | 34 - pkg/database/ent/decision_update.go | 580 -------------- pkg/database/ent/event_update.go | 94 --- pkg/database/ent/lock_update.go | 35 - pkg/database/ent/meta_update.go | 128 ---- pkg/database/ent/migrate/schema.go | 1 - pkg/database/ent/mutation.go | 75 +- pkg/database/ent/runtime.go | 8 +- pkg/database/ent/schema/alert.go | 43 +- pkg/database/ent/schema/bouncer.go | 7 +- pkg/database/ent/schema/config.go | 3 +- pkg/database/ent/schema/decision.go | 25 +- pkg/database/ent/schema/event.go | 5 +- pkg/database/ent/schema/lock.go | 3 +- pkg/database/ent/schema/meta.go | 7 +- 21 files changed, 54 insertions(+), 2236 deletions(-) diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index 8b88c35c7d7..48ce221ac82 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -38,442 +38,6 @@ func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { return au } -// SetScenario sets the "scenario" field. -func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { - au.mutation.SetScenario(s) - return au -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (au *AlertUpdate) SetNillableScenario(s *string) *AlertUpdate { - if s != nil { - au.SetScenario(*s) - } - return au -} - -// SetBucketId sets the "bucketId" field. -func (au *AlertUpdate) SetBucketId(s string) *AlertUpdate { - au.mutation.SetBucketId(s) - return au -} - -// SetNillableBucketId sets the "bucketId" field if the given value is not nil. -func (au *AlertUpdate) SetNillableBucketId(s *string) *AlertUpdate { - if s != nil { - au.SetBucketId(*s) - } - return au -} - -// ClearBucketId clears the value of the "bucketId" field. -func (au *AlertUpdate) ClearBucketId() *AlertUpdate { - au.mutation.ClearBucketId() - return au -} - -// SetMessage sets the "message" field. -func (au *AlertUpdate) SetMessage(s string) *AlertUpdate { - au.mutation.SetMessage(s) - return au -} - -// SetNillableMessage sets the "message" field if the given value is not nil. -func (au *AlertUpdate) SetNillableMessage(s *string) *AlertUpdate { - if s != nil { - au.SetMessage(*s) - } - return au -} - -// ClearMessage clears the value of the "message" field. -func (au *AlertUpdate) ClearMessage() *AlertUpdate { - au.mutation.ClearMessage() - return au -} - -// SetEventsCount sets the "eventsCount" field. -func (au *AlertUpdate) SetEventsCount(i int32) *AlertUpdate { - au.mutation.ResetEventsCount() - au.mutation.SetEventsCount(i) - return au -} - -// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. -func (au *AlertUpdate) SetNillableEventsCount(i *int32) *AlertUpdate { - if i != nil { - au.SetEventsCount(*i) - } - return au -} - -// AddEventsCount adds i to the "eventsCount" field. -func (au *AlertUpdate) AddEventsCount(i int32) *AlertUpdate { - au.mutation.AddEventsCount(i) - return au -} - -// ClearEventsCount clears the value of the "eventsCount" field. -func (au *AlertUpdate) ClearEventsCount() *AlertUpdate { - au.mutation.ClearEventsCount() - return au -} - -// SetStartedAt sets the "startedAt" field. -func (au *AlertUpdate) SetStartedAt(t time.Time) *AlertUpdate { - au.mutation.SetStartedAt(t) - return au -} - -// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. -func (au *AlertUpdate) SetNillableStartedAt(t *time.Time) *AlertUpdate { - if t != nil { - au.SetStartedAt(*t) - } - return au -} - -// ClearStartedAt clears the value of the "startedAt" field. -func (au *AlertUpdate) ClearStartedAt() *AlertUpdate { - au.mutation.ClearStartedAt() - return au -} - -// SetStoppedAt sets the "stoppedAt" field. -func (au *AlertUpdate) SetStoppedAt(t time.Time) *AlertUpdate { - au.mutation.SetStoppedAt(t) - return au -} - -// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. -func (au *AlertUpdate) SetNillableStoppedAt(t *time.Time) *AlertUpdate { - if t != nil { - au.SetStoppedAt(*t) - } - return au -} - -// ClearStoppedAt clears the value of the "stoppedAt" field. -func (au *AlertUpdate) ClearStoppedAt() *AlertUpdate { - au.mutation.ClearStoppedAt() - return au -} - -// SetSourceIp sets the "sourceIp" field. -func (au *AlertUpdate) SetSourceIp(s string) *AlertUpdate { - au.mutation.SetSourceIp(s) - return au -} - -// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceIp(s *string) *AlertUpdate { - if s != nil { - au.SetSourceIp(*s) - } - return au -} - -// ClearSourceIp clears the value of the "sourceIp" field. -func (au *AlertUpdate) ClearSourceIp() *AlertUpdate { - au.mutation.ClearSourceIp() - return au -} - -// SetSourceRange sets the "sourceRange" field. -func (au *AlertUpdate) SetSourceRange(s string) *AlertUpdate { - au.mutation.SetSourceRange(s) - return au -} - -// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceRange(s *string) *AlertUpdate { - if s != nil { - au.SetSourceRange(*s) - } - return au -} - -// ClearSourceRange clears the value of the "sourceRange" field. -func (au *AlertUpdate) ClearSourceRange() *AlertUpdate { - au.mutation.ClearSourceRange() - return au -} - -// SetSourceAsNumber sets the "sourceAsNumber" field. -func (au *AlertUpdate) SetSourceAsNumber(s string) *AlertUpdate { - au.mutation.SetSourceAsNumber(s) - return au -} - -// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceAsNumber(s *string) *AlertUpdate { - if s != nil { - au.SetSourceAsNumber(*s) - } - return au -} - -// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. -func (au *AlertUpdate) ClearSourceAsNumber() *AlertUpdate { - au.mutation.ClearSourceAsNumber() - return au -} - -// SetSourceAsName sets the "sourceAsName" field. -func (au *AlertUpdate) SetSourceAsName(s string) *AlertUpdate { - au.mutation.SetSourceAsName(s) - return au -} - -// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceAsName(s *string) *AlertUpdate { - if s != nil { - au.SetSourceAsName(*s) - } - return au -} - -// ClearSourceAsName clears the value of the "sourceAsName" field. -func (au *AlertUpdate) ClearSourceAsName() *AlertUpdate { - au.mutation.ClearSourceAsName() - return au -} - -// SetSourceCountry sets the "sourceCountry" field. -func (au *AlertUpdate) SetSourceCountry(s string) *AlertUpdate { - au.mutation.SetSourceCountry(s) - return au -} - -// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceCountry(s *string) *AlertUpdate { - if s != nil { - au.SetSourceCountry(*s) - } - return au -} - -// ClearSourceCountry clears the value of the "sourceCountry" field. -func (au *AlertUpdate) ClearSourceCountry() *AlertUpdate { - au.mutation.ClearSourceCountry() - return au -} - -// SetSourceLatitude sets the "sourceLatitude" field. -func (au *AlertUpdate) SetSourceLatitude(f float32) *AlertUpdate { - au.mutation.ResetSourceLatitude() - au.mutation.SetSourceLatitude(f) - return au -} - -// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceLatitude(f *float32) *AlertUpdate { - if f != nil { - au.SetSourceLatitude(*f) - } - return au -} - -// AddSourceLatitude adds f to the "sourceLatitude" field. -func (au *AlertUpdate) AddSourceLatitude(f float32) *AlertUpdate { - au.mutation.AddSourceLatitude(f) - return au -} - -// ClearSourceLatitude clears the value of the "sourceLatitude" field. -func (au *AlertUpdate) ClearSourceLatitude() *AlertUpdate { - au.mutation.ClearSourceLatitude() - return au -} - -// SetSourceLongitude sets the "sourceLongitude" field. -func (au *AlertUpdate) SetSourceLongitude(f float32) *AlertUpdate { - au.mutation.ResetSourceLongitude() - au.mutation.SetSourceLongitude(f) - return au -} - -// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceLongitude(f *float32) *AlertUpdate { - if f != nil { - au.SetSourceLongitude(*f) - } - return au -} - -// AddSourceLongitude adds f to the "sourceLongitude" field. -func (au *AlertUpdate) AddSourceLongitude(f float32) *AlertUpdate { - au.mutation.AddSourceLongitude(f) - return au -} - -// ClearSourceLongitude clears the value of the "sourceLongitude" field. -func (au *AlertUpdate) ClearSourceLongitude() *AlertUpdate { - au.mutation.ClearSourceLongitude() - return au -} - -// SetSourceScope sets the "sourceScope" field. -func (au *AlertUpdate) SetSourceScope(s string) *AlertUpdate { - au.mutation.SetSourceScope(s) - return au -} - -// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceScope(s *string) *AlertUpdate { - if s != nil { - au.SetSourceScope(*s) - } - return au -} - -// ClearSourceScope clears the value of the "sourceScope" field. -func (au *AlertUpdate) ClearSourceScope() *AlertUpdate { - au.mutation.ClearSourceScope() - return au -} - -// SetSourceValue sets the "sourceValue" field. -func (au *AlertUpdate) SetSourceValue(s string) *AlertUpdate { - au.mutation.SetSourceValue(s) - return au -} - -// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceValue(s *string) *AlertUpdate { - if s != nil { - au.SetSourceValue(*s) - } - return au -} - -// ClearSourceValue clears the value of the "sourceValue" field. -func (au *AlertUpdate) ClearSourceValue() *AlertUpdate { - au.mutation.ClearSourceValue() - return au -} - -// SetCapacity sets the "capacity" field. -func (au *AlertUpdate) SetCapacity(i int32) *AlertUpdate { - au.mutation.ResetCapacity() - au.mutation.SetCapacity(i) - return au -} - -// SetNillableCapacity sets the "capacity" field if the given value is not nil. -func (au *AlertUpdate) SetNillableCapacity(i *int32) *AlertUpdate { - if i != nil { - au.SetCapacity(*i) - } - return au -} - -// AddCapacity adds i to the "capacity" field. -func (au *AlertUpdate) AddCapacity(i int32) *AlertUpdate { - au.mutation.AddCapacity(i) - return au -} - -// ClearCapacity clears the value of the "capacity" field. -func (au *AlertUpdate) ClearCapacity() *AlertUpdate { - au.mutation.ClearCapacity() - return au -} - -// SetLeakSpeed sets the "leakSpeed" field. -func (au *AlertUpdate) SetLeakSpeed(s string) *AlertUpdate { - au.mutation.SetLeakSpeed(s) - return au -} - -// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. -func (au *AlertUpdate) SetNillableLeakSpeed(s *string) *AlertUpdate { - if s != nil { - au.SetLeakSpeed(*s) - } - return au -} - -// ClearLeakSpeed clears the value of the "leakSpeed" field. -func (au *AlertUpdate) ClearLeakSpeed() *AlertUpdate { - au.mutation.ClearLeakSpeed() - return au -} - -// SetScenarioVersion sets the "scenarioVersion" field. -func (au *AlertUpdate) SetScenarioVersion(s string) *AlertUpdate { - au.mutation.SetScenarioVersion(s) - return au -} - -// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. -func (au *AlertUpdate) SetNillableScenarioVersion(s *string) *AlertUpdate { - if s != nil { - au.SetScenarioVersion(*s) - } - return au -} - -// ClearScenarioVersion clears the value of the "scenarioVersion" field. -func (au *AlertUpdate) ClearScenarioVersion() *AlertUpdate { - au.mutation.ClearScenarioVersion() - return au -} - -// SetScenarioHash sets the "scenarioHash" field. -func (au *AlertUpdate) SetScenarioHash(s string) *AlertUpdate { - au.mutation.SetScenarioHash(s) - return au -} - -// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. -func (au *AlertUpdate) SetNillableScenarioHash(s *string) *AlertUpdate { - if s != nil { - au.SetScenarioHash(*s) - } - return au -} - -// ClearScenarioHash clears the value of the "scenarioHash" field. -func (au *AlertUpdate) ClearScenarioHash() *AlertUpdate { - au.mutation.ClearScenarioHash() - return au -} - -// SetSimulated sets the "simulated" field. -func (au *AlertUpdate) SetSimulated(b bool) *AlertUpdate { - au.mutation.SetSimulated(b) - return au -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSimulated(b *bool) *AlertUpdate { - if b != nil { - au.SetSimulated(*b) - } - return au -} - -// SetUUID sets the "uuid" field. -func (au *AlertUpdate) SetUUID(s string) *AlertUpdate { - au.mutation.SetUUID(s) - return au -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (au *AlertUpdate) SetNillableUUID(s *string) *AlertUpdate { - if s != nil { - au.SetUUID(*s) - } - return au -} - -// ClearUUID clears the value of the "uuid" field. -func (au *AlertUpdate) ClearUUID() *AlertUpdate { - au.mutation.ClearUUID() - return au -} - // SetOwnerID sets the "owner" edge to the Machine entity by ID. func (au *AlertUpdate) SetOwnerID(id int) *AlertUpdate { au.mutation.SetOwnerID(id) @@ -660,135 +224,60 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := au.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := au.mutation.Scenario(); ok { - _spec.SetField(alert.FieldScenario, field.TypeString, value) - } - if value, ok := au.mutation.BucketId(); ok { - _spec.SetField(alert.FieldBucketId, field.TypeString, value) - } if au.mutation.BucketIdCleared() { _spec.ClearField(alert.FieldBucketId, field.TypeString) } - if value, ok := au.mutation.Message(); ok { - _spec.SetField(alert.FieldMessage, field.TypeString, value) - } if au.mutation.MessageCleared() { _spec.ClearField(alert.FieldMessage, field.TypeString) } - if value, ok := au.mutation.EventsCount(); ok { - _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) - } - if value, ok := au.mutation.AddedEventsCount(); ok { - _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) - } if au.mutation.EventsCountCleared() { _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } - if value, ok := au.mutation.StartedAt(); ok { - _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) - } if au.mutation.StartedAtCleared() { _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } - if value, ok := au.mutation.StoppedAt(); ok { - _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) - } if au.mutation.StoppedAtCleared() { _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } - if value, ok := au.mutation.SourceIp(); ok { - _spec.SetField(alert.FieldSourceIp, field.TypeString, value) - } if au.mutation.SourceIpCleared() { _spec.ClearField(alert.FieldSourceIp, field.TypeString) } - if value, ok := au.mutation.SourceRange(); ok { - _spec.SetField(alert.FieldSourceRange, field.TypeString, value) - } if au.mutation.SourceRangeCleared() { _spec.ClearField(alert.FieldSourceRange, field.TypeString) } - if value, ok := au.mutation.SourceAsNumber(); ok { - _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) - } if au.mutation.SourceAsNumberCleared() { _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } - if value, ok := au.mutation.SourceAsName(); ok { - _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) - } if au.mutation.SourceAsNameCleared() { _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } - if value, ok := au.mutation.SourceCountry(); ok { - _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) - } if au.mutation.SourceCountryCleared() { _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } - if value, ok := au.mutation.SourceLatitude(); ok { - _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } - if value, ok := au.mutation.AddedSourceLatitude(); ok { - _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } if au.mutation.SourceLatitudeCleared() { _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } - if value, ok := au.mutation.SourceLongitude(); ok { - _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } - if value, ok := au.mutation.AddedSourceLongitude(); ok { - _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } if au.mutation.SourceLongitudeCleared() { _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } - if value, ok := au.mutation.SourceScope(); ok { - _spec.SetField(alert.FieldSourceScope, field.TypeString, value) - } if au.mutation.SourceScopeCleared() { _spec.ClearField(alert.FieldSourceScope, field.TypeString) } - if value, ok := au.mutation.SourceValue(); ok { - _spec.SetField(alert.FieldSourceValue, field.TypeString, value) - } if au.mutation.SourceValueCleared() { _spec.ClearField(alert.FieldSourceValue, field.TypeString) } - if value, ok := au.mutation.Capacity(); ok { - _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) - } - if value, ok := au.mutation.AddedCapacity(); ok { - _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) - } if au.mutation.CapacityCleared() { _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } - if value, ok := au.mutation.LeakSpeed(); ok { - _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) - } if au.mutation.LeakSpeedCleared() { _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } - if value, ok := au.mutation.ScenarioVersion(); ok { - _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) - } if au.mutation.ScenarioVersionCleared() { _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } - if value, ok := au.mutation.ScenarioHash(); ok { - _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) - } if au.mutation.ScenarioHashCleared() { _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } - if value, ok := au.mutation.Simulated(); ok { - _spec.SetField(alert.FieldSimulated, field.TypeBool, value) - } - if value, ok := au.mutation.UUID(); ok { - _spec.SetField(alert.FieldUUID, field.TypeString, value) - } if au.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } @@ -982,442 +471,6 @@ func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { return auo } -// SetScenario sets the "scenario" field. -func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { - auo.mutation.SetScenario(s) - return auo -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableScenario(s *string) *AlertUpdateOne { - if s != nil { - auo.SetScenario(*s) - } - return auo -} - -// SetBucketId sets the "bucketId" field. -func (auo *AlertUpdateOne) SetBucketId(s string) *AlertUpdateOne { - auo.mutation.SetBucketId(s) - return auo -} - -// SetNillableBucketId sets the "bucketId" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableBucketId(s *string) *AlertUpdateOne { - if s != nil { - auo.SetBucketId(*s) - } - return auo -} - -// ClearBucketId clears the value of the "bucketId" field. -func (auo *AlertUpdateOne) ClearBucketId() *AlertUpdateOne { - auo.mutation.ClearBucketId() - return auo -} - -// SetMessage sets the "message" field. -func (auo *AlertUpdateOne) SetMessage(s string) *AlertUpdateOne { - auo.mutation.SetMessage(s) - return auo -} - -// SetNillableMessage sets the "message" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableMessage(s *string) *AlertUpdateOne { - if s != nil { - auo.SetMessage(*s) - } - return auo -} - -// ClearMessage clears the value of the "message" field. -func (auo *AlertUpdateOne) ClearMessage() *AlertUpdateOne { - auo.mutation.ClearMessage() - return auo -} - -// SetEventsCount sets the "eventsCount" field. -func (auo *AlertUpdateOne) SetEventsCount(i int32) *AlertUpdateOne { - auo.mutation.ResetEventsCount() - auo.mutation.SetEventsCount(i) - return auo -} - -// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableEventsCount(i *int32) *AlertUpdateOne { - if i != nil { - auo.SetEventsCount(*i) - } - return auo -} - -// AddEventsCount adds i to the "eventsCount" field. -func (auo *AlertUpdateOne) AddEventsCount(i int32) *AlertUpdateOne { - auo.mutation.AddEventsCount(i) - return auo -} - -// ClearEventsCount clears the value of the "eventsCount" field. -func (auo *AlertUpdateOne) ClearEventsCount() *AlertUpdateOne { - auo.mutation.ClearEventsCount() - return auo -} - -// SetStartedAt sets the "startedAt" field. -func (auo *AlertUpdateOne) SetStartedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetStartedAt(t) - return auo -} - -// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableStartedAt(t *time.Time) *AlertUpdateOne { - if t != nil { - auo.SetStartedAt(*t) - } - return auo -} - -// ClearStartedAt clears the value of the "startedAt" field. -func (auo *AlertUpdateOne) ClearStartedAt() *AlertUpdateOne { - auo.mutation.ClearStartedAt() - return auo -} - -// SetStoppedAt sets the "stoppedAt" field. -func (auo *AlertUpdateOne) SetStoppedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetStoppedAt(t) - return auo -} - -// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableStoppedAt(t *time.Time) *AlertUpdateOne { - if t != nil { - auo.SetStoppedAt(*t) - } - return auo -} - -// ClearStoppedAt clears the value of the "stoppedAt" field. -func (auo *AlertUpdateOne) ClearStoppedAt() *AlertUpdateOne { - auo.mutation.ClearStoppedAt() - return auo -} - -// SetSourceIp sets the "sourceIp" field. -func (auo *AlertUpdateOne) SetSourceIp(s string) *AlertUpdateOne { - auo.mutation.SetSourceIp(s) - return auo -} - -// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceIp(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceIp(*s) - } - return auo -} - -// ClearSourceIp clears the value of the "sourceIp" field. -func (auo *AlertUpdateOne) ClearSourceIp() *AlertUpdateOne { - auo.mutation.ClearSourceIp() - return auo -} - -// SetSourceRange sets the "sourceRange" field. -func (auo *AlertUpdateOne) SetSourceRange(s string) *AlertUpdateOne { - auo.mutation.SetSourceRange(s) - return auo -} - -// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceRange(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceRange(*s) - } - return auo -} - -// ClearSourceRange clears the value of the "sourceRange" field. -func (auo *AlertUpdateOne) ClearSourceRange() *AlertUpdateOne { - auo.mutation.ClearSourceRange() - return auo -} - -// SetSourceAsNumber sets the "sourceAsNumber" field. -func (auo *AlertUpdateOne) SetSourceAsNumber(s string) *AlertUpdateOne { - auo.mutation.SetSourceAsNumber(s) - return auo -} - -// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceAsNumber(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceAsNumber(*s) - } - return auo -} - -// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. -func (auo *AlertUpdateOne) ClearSourceAsNumber() *AlertUpdateOne { - auo.mutation.ClearSourceAsNumber() - return auo -} - -// SetSourceAsName sets the "sourceAsName" field. -func (auo *AlertUpdateOne) SetSourceAsName(s string) *AlertUpdateOne { - auo.mutation.SetSourceAsName(s) - return auo -} - -// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceAsName(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceAsName(*s) - } - return auo -} - -// ClearSourceAsName clears the value of the "sourceAsName" field. -func (auo *AlertUpdateOne) ClearSourceAsName() *AlertUpdateOne { - auo.mutation.ClearSourceAsName() - return auo -} - -// SetSourceCountry sets the "sourceCountry" field. -func (auo *AlertUpdateOne) SetSourceCountry(s string) *AlertUpdateOne { - auo.mutation.SetSourceCountry(s) - return auo -} - -// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceCountry(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceCountry(*s) - } - return auo -} - -// ClearSourceCountry clears the value of the "sourceCountry" field. -func (auo *AlertUpdateOne) ClearSourceCountry() *AlertUpdateOne { - auo.mutation.ClearSourceCountry() - return auo -} - -// SetSourceLatitude sets the "sourceLatitude" field. -func (auo *AlertUpdateOne) SetSourceLatitude(f float32) *AlertUpdateOne { - auo.mutation.ResetSourceLatitude() - auo.mutation.SetSourceLatitude(f) - return auo -} - -// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceLatitude(f *float32) *AlertUpdateOne { - if f != nil { - auo.SetSourceLatitude(*f) - } - return auo -} - -// AddSourceLatitude adds f to the "sourceLatitude" field. -func (auo *AlertUpdateOne) AddSourceLatitude(f float32) *AlertUpdateOne { - auo.mutation.AddSourceLatitude(f) - return auo -} - -// ClearSourceLatitude clears the value of the "sourceLatitude" field. -func (auo *AlertUpdateOne) ClearSourceLatitude() *AlertUpdateOne { - auo.mutation.ClearSourceLatitude() - return auo -} - -// SetSourceLongitude sets the "sourceLongitude" field. -func (auo *AlertUpdateOne) SetSourceLongitude(f float32) *AlertUpdateOne { - auo.mutation.ResetSourceLongitude() - auo.mutation.SetSourceLongitude(f) - return auo -} - -// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceLongitude(f *float32) *AlertUpdateOne { - if f != nil { - auo.SetSourceLongitude(*f) - } - return auo -} - -// AddSourceLongitude adds f to the "sourceLongitude" field. -func (auo *AlertUpdateOne) AddSourceLongitude(f float32) *AlertUpdateOne { - auo.mutation.AddSourceLongitude(f) - return auo -} - -// ClearSourceLongitude clears the value of the "sourceLongitude" field. -func (auo *AlertUpdateOne) ClearSourceLongitude() *AlertUpdateOne { - auo.mutation.ClearSourceLongitude() - return auo -} - -// SetSourceScope sets the "sourceScope" field. -func (auo *AlertUpdateOne) SetSourceScope(s string) *AlertUpdateOne { - auo.mutation.SetSourceScope(s) - return auo -} - -// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceScope(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceScope(*s) - } - return auo -} - -// ClearSourceScope clears the value of the "sourceScope" field. -func (auo *AlertUpdateOne) ClearSourceScope() *AlertUpdateOne { - auo.mutation.ClearSourceScope() - return auo -} - -// SetSourceValue sets the "sourceValue" field. -func (auo *AlertUpdateOne) SetSourceValue(s string) *AlertUpdateOne { - auo.mutation.SetSourceValue(s) - return auo -} - -// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceValue(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceValue(*s) - } - return auo -} - -// ClearSourceValue clears the value of the "sourceValue" field. -func (auo *AlertUpdateOne) ClearSourceValue() *AlertUpdateOne { - auo.mutation.ClearSourceValue() - return auo -} - -// SetCapacity sets the "capacity" field. -func (auo *AlertUpdateOne) SetCapacity(i int32) *AlertUpdateOne { - auo.mutation.ResetCapacity() - auo.mutation.SetCapacity(i) - return auo -} - -// SetNillableCapacity sets the "capacity" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableCapacity(i *int32) *AlertUpdateOne { - if i != nil { - auo.SetCapacity(*i) - } - return auo -} - -// AddCapacity adds i to the "capacity" field. -func (auo *AlertUpdateOne) AddCapacity(i int32) *AlertUpdateOne { - auo.mutation.AddCapacity(i) - return auo -} - -// ClearCapacity clears the value of the "capacity" field. -func (auo *AlertUpdateOne) ClearCapacity() *AlertUpdateOne { - auo.mutation.ClearCapacity() - return auo -} - -// SetLeakSpeed sets the "leakSpeed" field. -func (auo *AlertUpdateOne) SetLeakSpeed(s string) *AlertUpdateOne { - auo.mutation.SetLeakSpeed(s) - return auo -} - -// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableLeakSpeed(s *string) *AlertUpdateOne { - if s != nil { - auo.SetLeakSpeed(*s) - } - return auo -} - -// ClearLeakSpeed clears the value of the "leakSpeed" field. -func (auo *AlertUpdateOne) ClearLeakSpeed() *AlertUpdateOne { - auo.mutation.ClearLeakSpeed() - return auo -} - -// SetScenarioVersion sets the "scenarioVersion" field. -func (auo *AlertUpdateOne) SetScenarioVersion(s string) *AlertUpdateOne { - auo.mutation.SetScenarioVersion(s) - return auo -} - -// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableScenarioVersion(s *string) *AlertUpdateOne { - if s != nil { - auo.SetScenarioVersion(*s) - } - return auo -} - -// ClearScenarioVersion clears the value of the "scenarioVersion" field. -func (auo *AlertUpdateOne) ClearScenarioVersion() *AlertUpdateOne { - auo.mutation.ClearScenarioVersion() - return auo -} - -// SetScenarioHash sets the "scenarioHash" field. -func (auo *AlertUpdateOne) SetScenarioHash(s string) *AlertUpdateOne { - auo.mutation.SetScenarioHash(s) - return auo -} - -// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableScenarioHash(s *string) *AlertUpdateOne { - if s != nil { - auo.SetScenarioHash(*s) - } - return auo -} - -// ClearScenarioHash clears the value of the "scenarioHash" field. -func (auo *AlertUpdateOne) ClearScenarioHash() *AlertUpdateOne { - auo.mutation.ClearScenarioHash() - return auo -} - -// SetSimulated sets the "simulated" field. -func (auo *AlertUpdateOne) SetSimulated(b bool) *AlertUpdateOne { - auo.mutation.SetSimulated(b) - return auo -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSimulated(b *bool) *AlertUpdateOne { - if b != nil { - auo.SetSimulated(*b) - } - return auo -} - -// SetUUID sets the "uuid" field. -func (auo *AlertUpdateOne) SetUUID(s string) *AlertUpdateOne { - auo.mutation.SetUUID(s) - return auo -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableUUID(s *string) *AlertUpdateOne { - if s != nil { - auo.SetUUID(*s) - } - return auo -} - -// ClearUUID clears the value of the "uuid" field. -func (auo *AlertUpdateOne) ClearUUID() *AlertUpdateOne { - auo.mutation.ClearUUID() - return auo -} - // SetOwnerID sets the "owner" edge to the Machine entity by ID. func (auo *AlertUpdateOne) SetOwnerID(id int) *AlertUpdateOne { auo.mutation.SetOwnerID(id) @@ -1634,135 +687,60 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error if value, ok := auo.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := auo.mutation.Scenario(); ok { - _spec.SetField(alert.FieldScenario, field.TypeString, value) - } - if value, ok := auo.mutation.BucketId(); ok { - _spec.SetField(alert.FieldBucketId, field.TypeString, value) - } if auo.mutation.BucketIdCleared() { _spec.ClearField(alert.FieldBucketId, field.TypeString) } - if value, ok := auo.mutation.Message(); ok { - _spec.SetField(alert.FieldMessage, field.TypeString, value) - } if auo.mutation.MessageCleared() { _spec.ClearField(alert.FieldMessage, field.TypeString) } - if value, ok := auo.mutation.EventsCount(); ok { - _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) - } - if value, ok := auo.mutation.AddedEventsCount(); ok { - _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) - } if auo.mutation.EventsCountCleared() { _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } - if value, ok := auo.mutation.StartedAt(); ok { - _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) - } if auo.mutation.StartedAtCleared() { _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } - if value, ok := auo.mutation.StoppedAt(); ok { - _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) - } if auo.mutation.StoppedAtCleared() { _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } - if value, ok := auo.mutation.SourceIp(); ok { - _spec.SetField(alert.FieldSourceIp, field.TypeString, value) - } if auo.mutation.SourceIpCleared() { _spec.ClearField(alert.FieldSourceIp, field.TypeString) } - if value, ok := auo.mutation.SourceRange(); ok { - _spec.SetField(alert.FieldSourceRange, field.TypeString, value) - } if auo.mutation.SourceRangeCleared() { _spec.ClearField(alert.FieldSourceRange, field.TypeString) } - if value, ok := auo.mutation.SourceAsNumber(); ok { - _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) - } if auo.mutation.SourceAsNumberCleared() { _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } - if value, ok := auo.mutation.SourceAsName(); ok { - _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) - } if auo.mutation.SourceAsNameCleared() { _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } - if value, ok := auo.mutation.SourceCountry(); ok { - _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) - } if auo.mutation.SourceCountryCleared() { _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } - if value, ok := auo.mutation.SourceLatitude(); ok { - _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } - if value, ok := auo.mutation.AddedSourceLatitude(); ok { - _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } if auo.mutation.SourceLatitudeCleared() { _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } - if value, ok := auo.mutation.SourceLongitude(); ok { - _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } - if value, ok := auo.mutation.AddedSourceLongitude(); ok { - _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } if auo.mutation.SourceLongitudeCleared() { _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } - if value, ok := auo.mutation.SourceScope(); ok { - _spec.SetField(alert.FieldSourceScope, field.TypeString, value) - } if auo.mutation.SourceScopeCleared() { _spec.ClearField(alert.FieldSourceScope, field.TypeString) } - if value, ok := auo.mutation.SourceValue(); ok { - _spec.SetField(alert.FieldSourceValue, field.TypeString, value) - } if auo.mutation.SourceValueCleared() { _spec.ClearField(alert.FieldSourceValue, field.TypeString) } - if value, ok := auo.mutation.Capacity(); ok { - _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) - } - if value, ok := auo.mutation.AddedCapacity(); ok { - _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) - } if auo.mutation.CapacityCleared() { _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } - if value, ok := auo.mutation.LeakSpeed(); ok { - _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) - } if auo.mutation.LeakSpeedCleared() { _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } - if value, ok := auo.mutation.ScenarioVersion(); ok { - _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) - } if auo.mutation.ScenarioVersionCleared() { _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } - if value, ok := auo.mutation.ScenarioHash(); ok { - _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) - } if auo.mutation.ScenarioHashCleared() { _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } - if value, ok := auo.mutation.Simulated(); ok { - _spec.SetField(alert.FieldSimulated, field.TypeBool, value) - } - if value, ok := auo.mutation.UUID(); ok { - _spec.SetField(alert.FieldUUID, field.TypeString, value) - } if auo.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 7a3b280f53d..973442bfa66 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -33,8 +33,6 @@ type Bouncer struct { Type string `json:"type"` // Version holds the value of the "version" field. Version string `json:"version"` - // Until holds the value of the "until" field. - Until time.Time `json:"until"` // LastPull holds the value of the "last_pull" field. LastPull time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. @@ -53,7 +51,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullInt64) case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType: values[i] = new(sql.NullString) - case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull: + case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldLastPull: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -124,12 +122,6 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.Version = value.String } - case bouncer.FieldUntil: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field until", values[i]) - } else if value.Valid { - b.Until = value.Time - } case bouncer.FieldLastPull: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field last_pull", values[i]) @@ -201,9 +193,6 @@ func (b *Bouncer) String() string { builder.WriteString("version=") builder.WriteString(b.Version) builder.WriteString(", ") - builder.WriteString("until=") - builder.WriteString(b.Until.Format(time.ANSIC)) - builder.WriteString(", ") builder.WriteString("last_pull=") builder.WriteString(b.LastPull.Format(time.ANSIC)) builder.WriteString(", ") diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 7683c07752b..3f201347e40 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -29,8 +29,6 @@ const ( FieldType = "type" // FieldVersion holds the string denoting the version field in the database. FieldVersion = "version" - // FieldUntil holds the string denoting the until field in the database. - FieldUntil = "until" // FieldLastPull holds the string denoting the last_pull field in the database. FieldLastPull = "last_pull" // FieldAuthType holds the string denoting the auth_type field in the database. @@ -50,7 +48,6 @@ var Columns = []string{ FieldIPAddress, FieldType, FieldVersion, - FieldUntil, FieldLastPull, FieldAuthType, } @@ -74,8 +71,6 @@ var ( UpdateDefaultUpdatedAt func() time.Time // DefaultIPAddress holds the default value on creation for the "ip_address" field. DefaultIPAddress string - // DefaultUntil holds the default value on creation for the "until" field. - DefaultUntil func() time.Time // DefaultLastPull holds the default value on creation for the "last_pull" field. DefaultLastPull func() time.Time // DefaultAuthType holds the default value on creation for the "auth_type" field. @@ -130,11 +125,6 @@ func ByVersion(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldVersion, opts...).ToFunc() } -// ByUntil orders the results by the until field. -func ByUntil(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUntil, opts...).ToFunc() -} - // ByLastPull orders the results by the last_pull field. func ByLastPull(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldLastPull, opts...).ToFunc() diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index ee28d7bb6ff..86079794fee 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -94,11 +94,6 @@ func Version(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldVersion, v)) } -// Until applies equality check predicate on the "until" field. It's identical to UntilEQ. -func Until(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) -} - // LastPull applies equality check predicate on the "last_pull" field. It's identical to LastPullEQ. func LastPull(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) @@ -554,56 +549,6 @@ func VersionContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldVersion, v)) } -// UntilEQ applies the EQ predicate on the "until" field. -func UntilEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) -} - -// UntilNEQ applies the NEQ predicate on the "until" field. -func UntilNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldNEQ(FieldUntil, v)) -} - -// UntilIn applies the In predicate on the "until" field. -func UntilIn(vs ...time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldIn(FieldUntil, vs...)) -} - -// UntilNotIn applies the NotIn predicate on the "until" field. -func UntilNotIn(vs ...time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotIn(FieldUntil, vs...)) -} - -// UntilGT applies the GT predicate on the "until" field. -func UntilGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldGT(FieldUntil, v)) -} - -// UntilGTE applies the GTE predicate on the "until" field. -func UntilGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldGTE(FieldUntil, v)) -} - -// UntilLT applies the LT predicate on the "until" field. -func UntilLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldLT(FieldUntil, v)) -} - -// UntilLTE applies the LTE predicate on the "until" field. -func UntilLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldLTE(FieldUntil, v)) -} - -// UntilIsNil applies the IsNil predicate on the "until" field. -func UntilIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldUntil)) -} - -// UntilNotNil applies the NotNil predicate on the "until" field. -func UntilNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldUntil)) -} - // LastPullEQ applies the EQ predicate on the "last_pull" field. func LastPullEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index ceff4db583e..7a4b3d9b013 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -108,20 +108,6 @@ func (bc *BouncerCreate) SetNillableVersion(s *string) *BouncerCreate { return bc } -// SetUntil sets the "until" field. -func (bc *BouncerCreate) SetUntil(t time.Time) *BouncerCreate { - bc.mutation.SetUntil(t) - return bc -} - -// SetNillableUntil sets the "until" field if the given value is not nil. -func (bc *BouncerCreate) SetNillableUntil(t *time.Time) *BouncerCreate { - if t != nil { - bc.SetUntil(*t) - } - return bc -} - // SetLastPull sets the "last_pull" field. func (bc *BouncerCreate) SetLastPull(t time.Time) *BouncerCreate { bc.mutation.SetLastPull(t) @@ -197,10 +183,6 @@ func (bc *BouncerCreate) defaults() { v := bouncer.DefaultIPAddress bc.mutation.SetIPAddress(v) } - if _, ok := bc.mutation.Until(); !ok { - v := bouncer.DefaultUntil() - bc.mutation.SetUntil(v) - } if _, ok := bc.mutation.LastPull(); !ok { v := bouncer.DefaultLastPull() bc.mutation.SetLastPull(v) @@ -292,10 +274,6 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldVersion, field.TypeString, value) _node.Version = value } - if value, ok := bc.mutation.Until(); ok { - _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) - _node.Until = value - } if value, ok := bc.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) _node.LastPull = value diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index 76968cf5202..1dc5aa080c3 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -28,40 +28,12 @@ func (bu *BouncerUpdate) Where(ps ...predicate.Bouncer) *BouncerUpdate { return bu } -// SetCreatedAt sets the "created_at" field. -func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { - bu.mutation.SetCreatedAt(t) - return bu -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (bu *BouncerUpdate) SetNillableCreatedAt(t *time.Time) *BouncerUpdate { - if t != nil { - bu.SetCreatedAt(*t) - } - return bu -} - // SetUpdatedAt sets the "updated_at" field. func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { bu.mutation.SetUpdatedAt(t) return bu } -// SetName sets the "name" field. -func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { - bu.mutation.SetName(s) - return bu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (bu *BouncerUpdate) SetNillableName(s *string) *BouncerUpdate { - if s != nil { - bu.SetName(*s) - } - return bu -} - // SetAPIKey sets the "api_key" field. func (bu *BouncerUpdate) SetAPIKey(s string) *BouncerUpdate { bu.mutation.SetAPIKey(s) @@ -150,26 +122,6 @@ func (bu *BouncerUpdate) ClearVersion() *BouncerUpdate { return bu } -// SetUntil sets the "until" field. -func (bu *BouncerUpdate) SetUntil(t time.Time) *BouncerUpdate { - bu.mutation.SetUntil(t) - return bu -} - -// SetNillableUntil sets the "until" field if the given value is not nil. -func (bu *BouncerUpdate) SetNillableUntil(t *time.Time) *BouncerUpdate { - if t != nil { - bu.SetUntil(*t) - } - return bu -} - -// ClearUntil clears the value of the "until" field. -func (bu *BouncerUpdate) ClearUntil() *BouncerUpdate { - bu.mutation.ClearUntil() - return bu -} - // SetLastPull sets the "last_pull" field. func (bu *BouncerUpdate) SetLastPull(t time.Time) *BouncerUpdate { bu.mutation.SetLastPull(t) @@ -248,15 +200,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := bu.mutation.CreatedAt(); ok { - _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - } if value, ok := bu.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := bu.mutation.Name(); ok { - _spec.SetField(bouncer.FieldName, field.TypeString, value) - } if value, ok := bu.mutation.APIKey(); ok { _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } @@ -281,12 +227,6 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if bu.mutation.VersionCleared() { _spec.ClearField(bouncer.FieldVersion, field.TypeString) } - if value, ok := bu.mutation.Until(); ok { - _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) - } - if bu.mutation.UntilCleared() { - _spec.ClearField(bouncer.FieldUntil, field.TypeTime) - } if value, ok := bu.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } @@ -313,40 +253,12 @@ type BouncerUpdateOne struct { mutation *BouncerMutation } -// SetCreatedAt sets the "created_at" field. -func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { - buo.mutation.SetCreatedAt(t) - return buo -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (buo *BouncerUpdateOne) SetNillableCreatedAt(t *time.Time) *BouncerUpdateOne { - if t != nil { - buo.SetCreatedAt(*t) - } - return buo -} - // SetUpdatedAt sets the "updated_at" field. func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { buo.mutation.SetUpdatedAt(t) return buo } -// SetName sets the "name" field. -func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { - buo.mutation.SetName(s) - return buo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (buo *BouncerUpdateOne) SetNillableName(s *string) *BouncerUpdateOne { - if s != nil { - buo.SetName(*s) - } - return buo -} - // SetAPIKey sets the "api_key" field. func (buo *BouncerUpdateOne) SetAPIKey(s string) *BouncerUpdateOne { buo.mutation.SetAPIKey(s) @@ -435,26 +347,6 @@ func (buo *BouncerUpdateOne) ClearVersion() *BouncerUpdateOne { return buo } -// SetUntil sets the "until" field. -func (buo *BouncerUpdateOne) SetUntil(t time.Time) *BouncerUpdateOne { - buo.mutation.SetUntil(t) - return buo -} - -// SetNillableUntil sets the "until" field if the given value is not nil. -func (buo *BouncerUpdateOne) SetNillableUntil(t *time.Time) *BouncerUpdateOne { - if t != nil { - buo.SetUntil(*t) - } - return buo -} - -// ClearUntil clears the value of the "until" field. -func (buo *BouncerUpdateOne) ClearUntil() *BouncerUpdateOne { - buo.mutation.ClearUntil() - return buo -} - // SetLastPull sets the "last_pull" field. func (buo *BouncerUpdateOne) SetLastPull(t time.Time) *BouncerUpdateOne { buo.mutation.SetLastPull(t) @@ -563,15 +455,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e } } } - if value, ok := buo.mutation.CreatedAt(); ok { - _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - } if value, ok := buo.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := buo.mutation.Name(); ok { - _spec.SetField(bouncer.FieldName, field.TypeString, value) - } if value, ok := buo.mutation.APIKey(); ok { _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } @@ -596,12 +482,6 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if buo.mutation.VersionCleared() { _spec.ClearField(bouncer.FieldVersion, field.TypeString) } - if value, ok := buo.mutation.Until(); ok { - _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) - } - if buo.mutation.UntilCleared() { - _spec.ClearField(bouncer.FieldUntil, field.TypeTime) - } if value, ok := buo.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index d4f1f15d23a..82309459e76 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -34,20 +34,6 @@ func (ciu *ConfigItemUpdate) SetUpdatedAt(t time.Time) *ConfigItemUpdate { return ciu } -// SetName sets the "name" field. -func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { - ciu.mutation.SetName(s) - return ciu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (ciu *ConfigItemUpdate) SetNillableName(s *string) *ConfigItemUpdate { - if s != nil { - ciu.SetName(*s) - } - return ciu -} - // SetValue sets the "value" field. func (ciu *ConfigItemUpdate) SetValue(s string) *ConfigItemUpdate { ciu.mutation.SetValue(s) @@ -115,9 +101,6 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := ciu.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := ciu.mutation.Name(); ok { - _spec.SetField(configitem.FieldName, field.TypeString, value) - } if value, ok := ciu.mutation.Value(); ok { _spec.SetField(configitem.FieldValue, field.TypeString, value) } @@ -147,20 +130,6 @@ func (ciuo *ConfigItemUpdateOne) SetUpdatedAt(t time.Time) *ConfigItemUpdateOne return ciuo } -// SetName sets the "name" field. -func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { - ciuo.mutation.SetName(s) - return ciuo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (ciuo *ConfigItemUpdateOne) SetNillableName(s *string) *ConfigItemUpdateOne { - if s != nil { - ciuo.SetName(*s) - } - return ciuo -} - // SetValue sets the "value" field. func (ciuo *ConfigItemUpdateOne) SetValue(s string) *ConfigItemUpdateOne { ciuo.mutation.SetValue(s) @@ -258,9 +227,6 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem if value, ok := ciuo.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := ciuo.mutation.Name(); ok { - _spec.SetField(configitem.FieldName, field.TypeString, value) - } if value, ok := ciuo.mutation.Value(); ok { _spec.SetField(configitem.FieldValue, field.TypeString, value) } diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 1bcb42f8c1f..68d0eb4ace7 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -55,245 +55,6 @@ func (du *DecisionUpdate) ClearUntil() *DecisionUpdate { return du } -// SetScenario sets the "scenario" field. -func (du *DecisionUpdate) SetScenario(s string) *DecisionUpdate { - du.mutation.SetScenario(s) - return du -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableScenario(s *string) *DecisionUpdate { - if s != nil { - du.SetScenario(*s) - } - return du -} - -// SetType sets the "type" field. -func (du *DecisionUpdate) SetType(s string) *DecisionUpdate { - du.mutation.SetType(s) - return du -} - -// SetNillableType sets the "type" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableType(s *string) *DecisionUpdate { - if s != nil { - du.SetType(*s) - } - return du -} - -// SetStartIP sets the "start_ip" field. -func (du *DecisionUpdate) SetStartIP(i int64) *DecisionUpdate { - du.mutation.ResetStartIP() - du.mutation.SetStartIP(i) - return du -} - -// SetNillableStartIP sets the "start_ip" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableStartIP(i *int64) *DecisionUpdate { - if i != nil { - du.SetStartIP(*i) - } - return du -} - -// AddStartIP adds i to the "start_ip" field. -func (du *DecisionUpdate) AddStartIP(i int64) *DecisionUpdate { - du.mutation.AddStartIP(i) - return du -} - -// ClearStartIP clears the value of the "start_ip" field. -func (du *DecisionUpdate) ClearStartIP() *DecisionUpdate { - du.mutation.ClearStartIP() - return du -} - -// SetEndIP sets the "end_ip" field. -func (du *DecisionUpdate) SetEndIP(i int64) *DecisionUpdate { - du.mutation.ResetEndIP() - du.mutation.SetEndIP(i) - return du -} - -// SetNillableEndIP sets the "end_ip" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableEndIP(i *int64) *DecisionUpdate { - if i != nil { - du.SetEndIP(*i) - } - return du -} - -// AddEndIP adds i to the "end_ip" field. -func (du *DecisionUpdate) AddEndIP(i int64) *DecisionUpdate { - du.mutation.AddEndIP(i) - return du -} - -// ClearEndIP clears the value of the "end_ip" field. -func (du *DecisionUpdate) ClearEndIP() *DecisionUpdate { - du.mutation.ClearEndIP() - return du -} - -// SetStartSuffix sets the "start_suffix" field. -func (du *DecisionUpdate) SetStartSuffix(i int64) *DecisionUpdate { - du.mutation.ResetStartSuffix() - du.mutation.SetStartSuffix(i) - return du -} - -// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableStartSuffix(i *int64) *DecisionUpdate { - if i != nil { - du.SetStartSuffix(*i) - } - return du -} - -// AddStartSuffix adds i to the "start_suffix" field. -func (du *DecisionUpdate) AddStartSuffix(i int64) *DecisionUpdate { - du.mutation.AddStartSuffix(i) - return du -} - -// ClearStartSuffix clears the value of the "start_suffix" field. -func (du *DecisionUpdate) ClearStartSuffix() *DecisionUpdate { - du.mutation.ClearStartSuffix() - return du -} - -// SetEndSuffix sets the "end_suffix" field. -func (du *DecisionUpdate) SetEndSuffix(i int64) *DecisionUpdate { - du.mutation.ResetEndSuffix() - du.mutation.SetEndSuffix(i) - return du -} - -// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableEndSuffix(i *int64) *DecisionUpdate { - if i != nil { - du.SetEndSuffix(*i) - } - return du -} - -// AddEndSuffix adds i to the "end_suffix" field. -func (du *DecisionUpdate) AddEndSuffix(i int64) *DecisionUpdate { - du.mutation.AddEndSuffix(i) - return du -} - -// ClearEndSuffix clears the value of the "end_suffix" field. -func (du *DecisionUpdate) ClearEndSuffix() *DecisionUpdate { - du.mutation.ClearEndSuffix() - return du -} - -// SetIPSize sets the "ip_size" field. -func (du *DecisionUpdate) SetIPSize(i int64) *DecisionUpdate { - du.mutation.ResetIPSize() - du.mutation.SetIPSize(i) - return du -} - -// SetNillableIPSize sets the "ip_size" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableIPSize(i *int64) *DecisionUpdate { - if i != nil { - du.SetIPSize(*i) - } - return du -} - -// AddIPSize adds i to the "ip_size" field. -func (du *DecisionUpdate) AddIPSize(i int64) *DecisionUpdate { - du.mutation.AddIPSize(i) - return du -} - -// ClearIPSize clears the value of the "ip_size" field. -func (du *DecisionUpdate) ClearIPSize() *DecisionUpdate { - du.mutation.ClearIPSize() - return du -} - -// SetScope sets the "scope" field. -func (du *DecisionUpdate) SetScope(s string) *DecisionUpdate { - du.mutation.SetScope(s) - return du -} - -// SetNillableScope sets the "scope" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableScope(s *string) *DecisionUpdate { - if s != nil { - du.SetScope(*s) - } - return du -} - -// SetValue sets the "value" field. -func (du *DecisionUpdate) SetValue(s string) *DecisionUpdate { - du.mutation.SetValue(s) - return du -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableValue(s *string) *DecisionUpdate { - if s != nil { - du.SetValue(*s) - } - return du -} - -// SetOrigin sets the "origin" field. -func (du *DecisionUpdate) SetOrigin(s string) *DecisionUpdate { - du.mutation.SetOrigin(s) - return du -} - -// SetNillableOrigin sets the "origin" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableOrigin(s *string) *DecisionUpdate { - if s != nil { - du.SetOrigin(*s) - } - return du -} - -// SetSimulated sets the "simulated" field. -func (du *DecisionUpdate) SetSimulated(b bool) *DecisionUpdate { - du.mutation.SetSimulated(b) - return du -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableSimulated(b *bool) *DecisionUpdate { - if b != nil { - du.SetSimulated(*b) - } - return du -} - -// SetUUID sets the "uuid" field. -func (du *DecisionUpdate) SetUUID(s string) *DecisionUpdate { - du.mutation.SetUUID(s) - return du -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableUUID(s *string) *DecisionUpdate { - if s != nil { - du.SetUUID(*s) - } - return du -} - -// ClearUUID clears the value of the "uuid" field. -func (du *DecisionUpdate) ClearUUID() *DecisionUpdate { - du.mutation.ClearUUID() - return du -} - // SetAlertDecisions sets the "alert_decisions" field. func (du *DecisionUpdate) SetAlertDecisions(i int) *DecisionUpdate { du.mutation.SetAlertDecisions(i) @@ -398,72 +159,21 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { if du.mutation.UntilCleared() { _spec.ClearField(decision.FieldUntil, field.TypeTime) } - if value, ok := du.mutation.Scenario(); ok { - _spec.SetField(decision.FieldScenario, field.TypeString, value) - } - if value, ok := du.mutation.GetType(); ok { - _spec.SetField(decision.FieldType, field.TypeString, value) - } - if value, ok := du.mutation.StartIP(); ok { - _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedStartIP(); ok { - _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) - } if du.mutation.StartIPCleared() { _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } - if value, ok := du.mutation.EndIP(); ok { - _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedEndIP(); ok { - _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) - } if du.mutation.EndIPCleared() { _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } - if value, ok := du.mutation.StartSuffix(); ok { - _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedStartSuffix(); ok { - _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) - } if du.mutation.StartSuffixCleared() { _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } - if value, ok := du.mutation.EndSuffix(); ok { - _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedEndSuffix(); ok { - _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) - } if du.mutation.EndSuffixCleared() { _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } - if value, ok := du.mutation.IPSize(); ok { - _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedIPSize(); ok { - _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) - } if du.mutation.IPSizeCleared() { _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } - if value, ok := du.mutation.Scope(); ok { - _spec.SetField(decision.FieldScope, field.TypeString, value) - } - if value, ok := du.mutation.Value(); ok { - _spec.SetField(decision.FieldValue, field.TypeString, value) - } - if value, ok := du.mutation.Origin(); ok { - _spec.SetField(decision.FieldOrigin, field.TypeString, value) - } - if value, ok := du.mutation.Simulated(); ok { - _spec.SetField(decision.FieldSimulated, field.TypeBool, value) - } - if value, ok := du.mutation.UUID(); ok { - _spec.SetField(decision.FieldUUID, field.TypeString, value) - } if du.mutation.UUIDCleared() { _spec.ClearField(decision.FieldUUID, field.TypeString) } @@ -542,245 +252,6 @@ func (duo *DecisionUpdateOne) ClearUntil() *DecisionUpdateOne { return duo } -// SetScenario sets the "scenario" field. -func (duo *DecisionUpdateOne) SetScenario(s string) *DecisionUpdateOne { - duo.mutation.SetScenario(s) - return duo -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableScenario(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetScenario(*s) - } - return duo -} - -// SetType sets the "type" field. -func (duo *DecisionUpdateOne) SetType(s string) *DecisionUpdateOne { - duo.mutation.SetType(s) - return duo -} - -// SetNillableType sets the "type" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableType(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetType(*s) - } - return duo -} - -// SetStartIP sets the "start_ip" field. -func (duo *DecisionUpdateOne) SetStartIP(i int64) *DecisionUpdateOne { - duo.mutation.ResetStartIP() - duo.mutation.SetStartIP(i) - return duo -} - -// SetNillableStartIP sets the "start_ip" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableStartIP(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetStartIP(*i) - } - return duo -} - -// AddStartIP adds i to the "start_ip" field. -func (duo *DecisionUpdateOne) AddStartIP(i int64) *DecisionUpdateOne { - duo.mutation.AddStartIP(i) - return duo -} - -// ClearStartIP clears the value of the "start_ip" field. -func (duo *DecisionUpdateOne) ClearStartIP() *DecisionUpdateOne { - duo.mutation.ClearStartIP() - return duo -} - -// SetEndIP sets the "end_ip" field. -func (duo *DecisionUpdateOne) SetEndIP(i int64) *DecisionUpdateOne { - duo.mutation.ResetEndIP() - duo.mutation.SetEndIP(i) - return duo -} - -// SetNillableEndIP sets the "end_ip" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableEndIP(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetEndIP(*i) - } - return duo -} - -// AddEndIP adds i to the "end_ip" field. -func (duo *DecisionUpdateOne) AddEndIP(i int64) *DecisionUpdateOne { - duo.mutation.AddEndIP(i) - return duo -} - -// ClearEndIP clears the value of the "end_ip" field. -func (duo *DecisionUpdateOne) ClearEndIP() *DecisionUpdateOne { - duo.mutation.ClearEndIP() - return duo -} - -// SetStartSuffix sets the "start_suffix" field. -func (duo *DecisionUpdateOne) SetStartSuffix(i int64) *DecisionUpdateOne { - duo.mutation.ResetStartSuffix() - duo.mutation.SetStartSuffix(i) - return duo -} - -// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableStartSuffix(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetStartSuffix(*i) - } - return duo -} - -// AddStartSuffix adds i to the "start_suffix" field. -func (duo *DecisionUpdateOne) AddStartSuffix(i int64) *DecisionUpdateOne { - duo.mutation.AddStartSuffix(i) - return duo -} - -// ClearStartSuffix clears the value of the "start_suffix" field. -func (duo *DecisionUpdateOne) ClearStartSuffix() *DecisionUpdateOne { - duo.mutation.ClearStartSuffix() - return duo -} - -// SetEndSuffix sets the "end_suffix" field. -func (duo *DecisionUpdateOne) SetEndSuffix(i int64) *DecisionUpdateOne { - duo.mutation.ResetEndSuffix() - duo.mutation.SetEndSuffix(i) - return duo -} - -// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableEndSuffix(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetEndSuffix(*i) - } - return duo -} - -// AddEndSuffix adds i to the "end_suffix" field. -func (duo *DecisionUpdateOne) AddEndSuffix(i int64) *DecisionUpdateOne { - duo.mutation.AddEndSuffix(i) - return duo -} - -// ClearEndSuffix clears the value of the "end_suffix" field. -func (duo *DecisionUpdateOne) ClearEndSuffix() *DecisionUpdateOne { - duo.mutation.ClearEndSuffix() - return duo -} - -// SetIPSize sets the "ip_size" field. -func (duo *DecisionUpdateOne) SetIPSize(i int64) *DecisionUpdateOne { - duo.mutation.ResetIPSize() - duo.mutation.SetIPSize(i) - return duo -} - -// SetNillableIPSize sets the "ip_size" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableIPSize(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetIPSize(*i) - } - return duo -} - -// AddIPSize adds i to the "ip_size" field. -func (duo *DecisionUpdateOne) AddIPSize(i int64) *DecisionUpdateOne { - duo.mutation.AddIPSize(i) - return duo -} - -// ClearIPSize clears the value of the "ip_size" field. -func (duo *DecisionUpdateOne) ClearIPSize() *DecisionUpdateOne { - duo.mutation.ClearIPSize() - return duo -} - -// SetScope sets the "scope" field. -func (duo *DecisionUpdateOne) SetScope(s string) *DecisionUpdateOne { - duo.mutation.SetScope(s) - return duo -} - -// SetNillableScope sets the "scope" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableScope(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetScope(*s) - } - return duo -} - -// SetValue sets the "value" field. -func (duo *DecisionUpdateOne) SetValue(s string) *DecisionUpdateOne { - duo.mutation.SetValue(s) - return duo -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableValue(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetValue(*s) - } - return duo -} - -// SetOrigin sets the "origin" field. -func (duo *DecisionUpdateOne) SetOrigin(s string) *DecisionUpdateOne { - duo.mutation.SetOrigin(s) - return duo -} - -// SetNillableOrigin sets the "origin" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableOrigin(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetOrigin(*s) - } - return duo -} - -// SetSimulated sets the "simulated" field. -func (duo *DecisionUpdateOne) SetSimulated(b bool) *DecisionUpdateOne { - duo.mutation.SetSimulated(b) - return duo -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableSimulated(b *bool) *DecisionUpdateOne { - if b != nil { - duo.SetSimulated(*b) - } - return duo -} - -// SetUUID sets the "uuid" field. -func (duo *DecisionUpdateOne) SetUUID(s string) *DecisionUpdateOne { - duo.mutation.SetUUID(s) - return duo -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableUUID(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetUUID(*s) - } - return duo -} - -// ClearUUID clears the value of the "uuid" field. -func (duo *DecisionUpdateOne) ClearUUID() *DecisionUpdateOne { - duo.mutation.ClearUUID() - return duo -} - // SetAlertDecisions sets the "alert_decisions" field. func (duo *DecisionUpdateOne) SetAlertDecisions(i int) *DecisionUpdateOne { duo.mutation.SetAlertDecisions(i) @@ -915,72 +386,21 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err if duo.mutation.UntilCleared() { _spec.ClearField(decision.FieldUntil, field.TypeTime) } - if value, ok := duo.mutation.Scenario(); ok { - _spec.SetField(decision.FieldScenario, field.TypeString, value) - } - if value, ok := duo.mutation.GetType(); ok { - _spec.SetField(decision.FieldType, field.TypeString, value) - } - if value, ok := duo.mutation.StartIP(); ok { - _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedStartIP(); ok { - _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) - } if duo.mutation.StartIPCleared() { _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } - if value, ok := duo.mutation.EndIP(); ok { - _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedEndIP(); ok { - _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) - } if duo.mutation.EndIPCleared() { _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } - if value, ok := duo.mutation.StartSuffix(); ok { - _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedStartSuffix(); ok { - _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) - } if duo.mutation.StartSuffixCleared() { _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } - if value, ok := duo.mutation.EndSuffix(); ok { - _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedEndSuffix(); ok { - _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) - } if duo.mutation.EndSuffixCleared() { _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } - if value, ok := duo.mutation.IPSize(); ok { - _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedIPSize(); ok { - _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) - } if duo.mutation.IPSizeCleared() { _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } - if value, ok := duo.mutation.Scope(); ok { - _spec.SetField(decision.FieldScope, field.TypeString, value) - } - if value, ok := duo.mutation.Value(); ok { - _spec.SetField(decision.FieldValue, field.TypeString, value) - } - if value, ok := duo.mutation.Origin(); ok { - _spec.SetField(decision.FieldOrigin, field.TypeString, value) - } - if value, ok := duo.mutation.Simulated(); ok { - _spec.SetField(decision.FieldSimulated, field.TypeBool, value) - } - if value, ok := duo.mutation.UUID(); ok { - _spec.SetField(decision.FieldUUID, field.TypeString, value) - } if duo.mutation.UUIDCleared() { _spec.ClearField(decision.FieldUUID, field.TypeString) } diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index 0bc8a7f9243..c2f5c6cddb1 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -35,34 +35,6 @@ func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { return eu } -// SetTime sets the "time" field. -func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { - eu.mutation.SetTime(t) - return eu -} - -// SetNillableTime sets the "time" field if the given value is not nil. -func (eu *EventUpdate) SetNillableTime(t *time.Time) *EventUpdate { - if t != nil { - eu.SetTime(*t) - } - return eu -} - -// SetSerialized sets the "serialized" field. -func (eu *EventUpdate) SetSerialized(s string) *EventUpdate { - eu.mutation.SetSerialized(s) - return eu -} - -// SetNillableSerialized sets the "serialized" field if the given value is not nil. -func (eu *EventUpdate) SetNillableSerialized(s *string) *EventUpdate { - if s != nil { - eu.SetSerialized(*s) - } - return eu -} - // SetAlertEvents sets the "alert_events" field. func (eu *EventUpdate) SetAlertEvents(i int) *EventUpdate { eu.mutation.SetAlertEvents(i) @@ -149,20 +121,7 @@ func (eu *EventUpdate) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (eu *EventUpdate) check() error { - if v, ok := eu.mutation.Serialized(); ok { - if err := event.SerializedValidator(v); err != nil { - return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} - } - } - return nil -} - func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := eu.check(); err != nil { - return n, err - } _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) if ps := eu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -174,12 +133,6 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := eu.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := eu.mutation.Time(); ok { - _spec.SetField(event.FieldTime, field.TypeTime, value) - } - if value, ok := eu.mutation.Serialized(); ok { - _spec.SetField(event.FieldSerialized, field.TypeString, value) - } if eu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -235,34 +188,6 @@ func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { return euo } -// SetTime sets the "time" field. -func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { - euo.mutation.SetTime(t) - return euo -} - -// SetNillableTime sets the "time" field if the given value is not nil. -func (euo *EventUpdateOne) SetNillableTime(t *time.Time) *EventUpdateOne { - if t != nil { - euo.SetTime(*t) - } - return euo -} - -// SetSerialized sets the "serialized" field. -func (euo *EventUpdateOne) SetSerialized(s string) *EventUpdateOne { - euo.mutation.SetSerialized(s) - return euo -} - -// SetNillableSerialized sets the "serialized" field if the given value is not nil. -func (euo *EventUpdateOne) SetNillableSerialized(s *string) *EventUpdateOne { - if s != nil { - euo.SetSerialized(*s) - } - return euo -} - // SetAlertEvents sets the "alert_events" field. func (euo *EventUpdateOne) SetAlertEvents(i int) *EventUpdateOne { euo.mutation.SetAlertEvents(i) @@ -362,20 +287,7 @@ func (euo *EventUpdateOne) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (euo *EventUpdateOne) check() error { - if v, ok := euo.mutation.Serialized(); ok { - if err := event.SerializedValidator(v); err != nil { - return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} - } - } - return nil -} - func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) { - if err := euo.check(); err != nil { - return _node, err - } _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) id, ok := euo.mutation.ID() if !ok { @@ -404,12 +316,6 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error if value, ok := euo.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := euo.mutation.Time(); ok { - _spec.SetField(event.FieldTime, field.TypeTime, value) - } - if value, ok := euo.mutation.Serialized(); ok { - _spec.SetField(event.FieldSerialized, field.TypeString, value) - } if euo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index 988363abd17..934e68c0762 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -6,7 +6,6 @@ import ( "context" "errors" "fmt" - "time" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,20 +27,6 @@ func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { return lu } -// SetCreatedAt sets the "created_at" field. -func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { - lu.mutation.SetCreatedAt(t) - return lu -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (lu *LockUpdate) SetNillableCreatedAt(t *time.Time) *LockUpdate { - if t != nil { - lu.SetCreatedAt(*t) - } - return lu -} - // Mutation returns the LockMutation object of the builder. func (lu *LockUpdate) Mutation() *LockMutation { return lu.mutation @@ -83,9 +68,6 @@ func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := lu.mutation.CreatedAt(); ok { - _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) - } if n, err = sqlgraph.UpdateNodes(ctx, lu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{lock.Label} @@ -106,20 +88,6 @@ type LockUpdateOne struct { mutation *LockMutation } -// SetCreatedAt sets the "created_at" field. -func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { - luo.mutation.SetCreatedAt(t) - return luo -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (luo *LockUpdateOne) SetNillableCreatedAt(t *time.Time) *LockUpdateOne { - if t != nil { - luo.SetCreatedAt(*t) - } - return luo -} - // Mutation returns the LockMutation object of the builder. func (luo *LockUpdateOne) Mutation() *LockMutation { return luo.mutation @@ -191,9 +159,6 @@ func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) } } } - if value, ok := luo.mutation.CreatedAt(); ok { - _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) - } _node = &Lock{config: luo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index 76567c5eff7..bdf622eb6c3 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -29,54 +29,12 @@ func (mu *MetaUpdate) Where(ps ...predicate.Meta) *MetaUpdate { return mu } -// SetCreatedAt sets the "created_at" field. -func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { - mu.mutation.SetCreatedAt(t) - return mu -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (mu *MetaUpdate) SetNillableCreatedAt(t *time.Time) *MetaUpdate { - if t != nil { - mu.SetCreatedAt(*t) - } - return mu -} - // SetUpdatedAt sets the "updated_at" field. func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { mu.mutation.SetUpdatedAt(t) return mu } -// SetKey sets the "key" field. -func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { - mu.mutation.SetKey(s) - return mu -} - -// SetNillableKey sets the "key" field if the given value is not nil. -func (mu *MetaUpdate) SetNillableKey(s *string) *MetaUpdate { - if s != nil { - mu.SetKey(*s) - } - return mu -} - -// SetValue sets the "value" field. -func (mu *MetaUpdate) SetValue(s string) *MetaUpdate { - mu.mutation.SetValue(s) - return mu -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (mu *MetaUpdate) SetNillableValue(s *string) *MetaUpdate { - if s != nil { - mu.SetValue(*s) - } - return mu -} - // SetAlertMetas sets the "alert_metas" field. func (mu *MetaUpdate) SetAlertMetas(i int) *MetaUpdate { mu.mutation.SetAlertMetas(i) @@ -163,20 +121,7 @@ func (mu *MetaUpdate) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (mu *MetaUpdate) check() error { - if v, ok := mu.mutation.Value(); ok { - if err := meta.ValueValidator(v); err != nil { - return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} - } - } - return nil -} - func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := mu.check(); err != nil { - return n, err - } _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) if ps := mu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -185,18 +130,9 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := mu.mutation.CreatedAt(); ok { - _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := mu.mutation.Key(); ok { - _spec.SetField(meta.FieldKey, field.TypeString, value) - } - if value, ok := mu.mutation.Value(); ok { - _spec.SetField(meta.FieldValue, field.TypeString, value) - } if mu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -246,54 +182,12 @@ type MetaUpdateOne struct { mutation *MetaMutation } -// SetCreatedAt sets the "created_at" field. -func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { - muo.mutation.SetCreatedAt(t) - return muo -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (muo *MetaUpdateOne) SetNillableCreatedAt(t *time.Time) *MetaUpdateOne { - if t != nil { - muo.SetCreatedAt(*t) - } - return muo -} - // SetUpdatedAt sets the "updated_at" field. func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { muo.mutation.SetUpdatedAt(t) return muo } -// SetKey sets the "key" field. -func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { - muo.mutation.SetKey(s) - return muo -} - -// SetNillableKey sets the "key" field if the given value is not nil. -func (muo *MetaUpdateOne) SetNillableKey(s *string) *MetaUpdateOne { - if s != nil { - muo.SetKey(*s) - } - return muo -} - -// SetValue sets the "value" field. -func (muo *MetaUpdateOne) SetValue(s string) *MetaUpdateOne { - muo.mutation.SetValue(s) - return muo -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (muo *MetaUpdateOne) SetNillableValue(s *string) *MetaUpdateOne { - if s != nil { - muo.SetValue(*s) - } - return muo -} - // SetAlertMetas sets the "alert_metas" field. func (muo *MetaUpdateOne) SetAlertMetas(i int) *MetaUpdateOne { muo.mutation.SetAlertMetas(i) @@ -393,20 +287,7 @@ func (muo *MetaUpdateOne) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (muo *MetaUpdateOne) check() error { - if v, ok := muo.mutation.Value(); ok { - if err := meta.ValueValidator(v); err != nil { - return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} - } - } - return nil -} - func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) { - if err := muo.check(); err != nil { - return _node, err - } _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) id, ok := muo.mutation.ID() if !ok { @@ -432,18 +313,9 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) } } } - if value, ok := muo.mutation.CreatedAt(); ok { - _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := muo.mutation.Key(); ok { - _spec.SetField(meta.FieldKey, field.TypeString, value) - } - if value, ok := muo.mutation.Value(); ok { - _spec.SetField(meta.FieldValue, field.TypeString, value) - } if muo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index e5d43d42314..b0e7f990f6e 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -68,7 +68,6 @@ var ( {Name: "ip_address", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "type", Type: field.TypeString, Nullable: true}, {Name: "version", Type: field.TypeString, Nullable: true}, - {Name: "until", Type: field.TypeTime, Nullable: true}, {Name: "last_pull", Type: field.TypeTime}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index aed004fb7a6..b88154324bb 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -2390,7 +2390,6 @@ type BouncerMutation struct { ip_address *string _type *string version *string - until *time.Time last_pull *time.Time auth_type *string clearedFields map[string]struct{} @@ -2824,55 +2823,6 @@ func (m *BouncerMutation) ResetVersion() { delete(m.clearedFields, bouncer.FieldVersion) } -// SetUntil sets the "until" field. -func (m *BouncerMutation) SetUntil(t time.Time) { - m.until = &t -} - -// Until returns the value of the "until" field in the mutation. -func (m *BouncerMutation) Until() (r time.Time, exists bool) { - v := m.until - if v == nil { - return - } - return *v, true -} - -// OldUntil returns the old "until" field's value of the Bouncer entity. -// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldUntil(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUntil is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUntil requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUntil: %w", err) - } - return oldValue.Until, nil -} - -// ClearUntil clears the value of the "until" field. -func (m *BouncerMutation) ClearUntil() { - m.until = nil - m.clearedFields[bouncer.FieldUntil] = struct{}{} -} - -// UntilCleared returns if the "until" field was cleared in this mutation. -func (m *BouncerMutation) UntilCleared() bool { - _, ok := m.clearedFields[bouncer.FieldUntil] - return ok -} - -// ResetUntil resets all changes to the "until" field. -func (m *BouncerMutation) ResetUntil() { - m.until = nil - delete(m.clearedFields, bouncer.FieldUntil) -} - // SetLastPull sets the "last_pull" field. func (m *BouncerMutation) SetLastPull(t time.Time) { m.last_pull = &t @@ -2979,7 +2929,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 11) + fields := make([]string, 0, 10) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -3004,9 +2954,6 @@ func (m *BouncerMutation) Fields() []string { if m.version != nil { fields = append(fields, bouncer.FieldVersion) } - if m.until != nil { - fields = append(fields, bouncer.FieldUntil) - } if m.last_pull != nil { fields = append(fields, bouncer.FieldLastPull) } @@ -3037,8 +2984,6 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.GetType() case bouncer.FieldVersion: return m.Version() - case bouncer.FieldUntil: - return m.Until() case bouncer.FieldLastPull: return m.LastPull() case bouncer.FieldAuthType: @@ -3068,8 +3013,6 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldType(ctx) case bouncer.FieldVersion: return m.OldVersion(ctx) - case bouncer.FieldUntil: - return m.OldUntil(ctx) case bouncer.FieldLastPull: return m.OldLastPull(ctx) case bouncer.FieldAuthType: @@ -3139,13 +3082,6 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetVersion(v) return nil - case bouncer.FieldUntil: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUntil(v) - return nil case bouncer.FieldLastPull: v, ok := value.(time.Time) if !ok { @@ -3199,9 +3135,6 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldVersion) { fields = append(fields, bouncer.FieldVersion) } - if m.FieldCleared(bouncer.FieldUntil) { - fields = append(fields, bouncer.FieldUntil) - } return fields } @@ -3225,9 +3158,6 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldVersion: m.ClearVersion() return nil - case bouncer.FieldUntil: - m.ClearUntil() - return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } @@ -3260,9 +3190,6 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldVersion: m.ResetVersion() return nil - case bouncer.FieldUntil: - m.ResetUntil() - return nil case bouncer.FieldLastPull: m.ResetLastPull() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 1c5b3460cce..c593cd89fcb 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -72,16 +72,12 @@ func init() { bouncerDescIPAddress := bouncerFields[5].Descriptor() // bouncer.DefaultIPAddress holds the default value on creation for the ip_address field. bouncer.DefaultIPAddress = bouncerDescIPAddress.Default.(string) - // bouncerDescUntil is the schema descriptor for until field. - bouncerDescUntil := bouncerFields[8].Descriptor() - // bouncer.DefaultUntil holds the default value on creation for the until field. - bouncer.DefaultUntil = bouncerDescUntil.Default.(func() time.Time) // bouncerDescLastPull is the schema descriptor for last_pull field. - bouncerDescLastPull := bouncerFields[9].Descriptor() + bouncerDescLastPull := bouncerFields[8].Descriptor() // bouncer.DefaultLastPull holds the default value on creation for the last_pull field. bouncer.DefaultLastPull = bouncerDescLastPull.Default.(func() time.Time) // bouncerDescAuthType is the schema descriptor for auth_type field. - bouncerDescAuthType := bouncerFields[10].Descriptor() + bouncerDescAuthType := bouncerFields[9].Descriptor() // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string) configitemFields := schema.ConfigItem{}.Fields() diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index bda7cc7d0b9..343979e3db7 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -6,6 +6,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -23,34 +24,34 @@ func (Alert) Fields() []ent.Field { field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow), - field.String("scenario"), - field.String("bucketId").Default("").Optional(), - field.String("message").Default("").Optional(), - field.Int32("eventsCount").Default(0).Optional(), - field.Time("startedAt").Default(types.UtcNow).Optional(), - field.Time("stoppedAt").Default(types.UtcNow).Optional(), + field.String("scenario").Immutable(), + field.String("bucketId").Default("").Optional().Immutable(), + field.String("message").Default("").Optional().Immutable(), + field.Int32("eventsCount").Default(0).Optional().Immutable(), + field.Time("startedAt").Default(types.UtcNow).Optional().Immutable(), + field.Time("stoppedAt").Default(types.UtcNow).Optional().Immutable(), field.String("sourceIp"). - Optional(), + Optional().Immutable(), field.String("sourceRange"). - Optional(), + Optional().Immutable(), field.String("sourceAsNumber"). - Optional(), + Optional().Immutable(), field.String("sourceAsName"). - Optional(), + Optional().Immutable(), field.String("sourceCountry"). - Optional(), + Optional().Immutable(), field.Float32("sourceLatitude"). - Optional(), + Optional().Immutable(), field.Float32("sourceLongitude"). - Optional(), - field.String("sourceScope").Optional(), - field.String("sourceValue").Optional(), - field.Int32("capacity").Optional(), - field.String("leakSpeed").Optional(), - field.String("scenarioVersion").Optional(), - field.String("scenarioHash").Optional(), - field.Bool("simulated").Default(false), - field.String("uuid").Optional(), //this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert + Optional().Immutable(), + field.String("sourceScope").Optional().Immutable(), + field.String("sourceValue").Optional().Immutable(), + field.Int32("capacity").Optional().Immutable(), + field.String("leakSpeed").Optional().Immutable(), + field.String("scenarioVersion").Optional().Immutable(), + field.String("scenarioHash").Optional().Immutable(), + field.Bool("simulated").Default(false).Immutable(), + field.String("uuid").Optional().Immutable(), // this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert } } diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 18efecb3f03..acaa86008f5 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -16,17 +17,17 @@ func (Bouncer) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - StructTag(`json:"created_at"`), + StructTag(`json:"created_at"`). + Immutable(), field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().StructTag(`json:"name"`).Immutable(), field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), field.String("type").Optional().StructTag(`json:"type"`), field.String("version").Optional().StructTag(`json:"version"`), - field.Time("until").Default(types.UtcNow).Optional().StructTag(`json:"until"`), field.Time("last_pull"). Default(types.UtcNow).StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), diff --git a/pkg/database/ent/schema/config.go b/pkg/database/ent/schema/config.go index 036c55908ba..d526db25a8d 100644 --- a/pkg/database/ent/schema/config.go +++ b/pkg/database/ent/schema/config.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -20,7 +21,7 @@ func (ConfigItem) Fields() []ent.Field { field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().StructTag(`json:"name"`).Immutable(), field.String("value").StructTag(`json:"value"`), // a json object } } diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go index d5193910146..4089be38096 100644 --- a/pkg/database/ent/schema/decision.go +++ b/pkg/database/ent/schema/decision.go @@ -6,6 +6,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -26,18 +27,18 @@ func (Decision) Fields() []ent.Field { field.Time("until").Nillable().Optional().SchemaType(map[string]string{ dialect.MySQL: "datetime", }), - field.String("scenario"), - field.String("type"), - field.Int64("start_ip").Optional(), - field.Int64("end_ip").Optional(), - field.Int64("start_suffix").Optional(), - field.Int64("end_suffix").Optional(), - field.Int64("ip_size").Optional(), - field.String("scope"), - field.String("value"), - field.String("origin"), - field.Bool("simulated").Default(false), - field.String("uuid").Optional(), //this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each decision + field.String("scenario").Immutable(), + field.String("type").Immutable(), + field.Int64("start_ip").Optional().Immutable(), + field.Int64("end_ip").Optional().Immutable(), + field.Int64("start_suffix").Optional().Immutable(), + field.Int64("end_suffix").Optional().Immutable(), + field.Int64("ip_size").Optional().Immutable(), + field.String("scope").Immutable(), + field.String("value").Immutable(), + field.String("origin").Immutable(), + field.Bool("simulated").Default(false).Immutable(), + field.String("uuid").Optional().Immutable(), // this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each decision field.Int("alert_decisions").Optional(), } } diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go index f982ebe9653..107f68e5274 100644 --- a/pkg/database/ent/schema/event.go +++ b/pkg/database/ent/schema/event.go @@ -5,6 +5,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -22,8 +23,8 @@ func (Event) Fields() []ent.Field { field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow), - field.Time("time"), - field.String("serialized").MaxLen(8191), + field.Time("time").Immutable(), + field.String("serialized").MaxLen(8191).Immutable(), field.Int("alert_events").Optional(), } } diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go index 0d49bac1bf6..a287e2b59ad 100644 --- a/pkg/database/ent/schema/lock.go +++ b/pkg/database/ent/schema/lock.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -13,7 +14,7 @@ type Lock struct { func (Lock) Fields() []ent.Field { return []ent.Field{ field.String("name").Unique().Immutable().StructTag(`json:"name"`), - field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), + field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`).Immutable(), } } diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go index 877fffa8a2e..a87010cd8a3 100644 --- a/pkg/database/ent/schema/meta.go +++ b/pkg/database/ent/schema/meta.go @@ -5,6 +5,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -17,12 +18,12 @@ type Meta struct { func (Meta) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). - Default(types.UtcNow), + Default(types.UtcNow).Immutable(), field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow), - field.String("key"), - field.String("value").MaxLen(4095), + field.String("key").Immutable(), + field.String("value").MaxLen(4095).Immutable(), field.Int("alert_metas").Optional(), } } From b8ee31ade232f85388880702a24c5edbde3ced63 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 15:44:30 +0200 Subject: [PATCH 149/318] pkg/database: simplify flush agents/bouncers (#3026) * pkg/database: simplify flush agents/bouncers * lint --- pkg/database/flush.go | 153 +++++++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 78 deletions(-) diff --git a/pkg/database/flush.go b/pkg/database/flush.go index ad4a912de84..56e42715b2c 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -17,22 +17,25 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) - func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" + if config.MaxItems != nil && *config.MaxItems <= 0 { return nil, errors.New("max_items can't be zero or negative number") } + if config.MaxItems != nil { maxItems = *config.MaxItems } + if config.MaxAge != nil && *config.MaxAge != "" { maxAge = *config.MaxAge } // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) @@ -46,38 +49,48 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched if err != nil { return nil, fmt.Errorf("while parsing agents cert auto-delete duration: %w", err) } + config.AgentsGC.CertDuration = &duration } + if config.AgentsGC.LoginPassword != nil { duration, err := ParseDuration(*config.AgentsGC.LoginPassword) if err != nil { return nil, fmt.Errorf("while parsing agents login/password auto-delete duration: %w", err) } + config.AgentsGC.LoginPasswordDuration = &duration } + if config.AgentsGC.Api != nil { log.Warning("agents auto-delete for API auth is not supported (use cert or login_password)") } } + if config.BouncersGC != nil { if config.BouncersGC.Cert != nil { duration, err := ParseDuration(*config.BouncersGC.Cert) if err != nil { return nil, fmt.Errorf("while parsing bouncers cert auto-delete duration: %w", err) } + config.BouncersGC.CertDuration = &duration } + if config.BouncersGC.Api != nil { duration, err := ParseDuration(*config.BouncersGC.Api) if err != nil { return nil, fmt.Errorf("while parsing bouncers api auto-delete duration: %w", err) } + config.BouncersGC.ApiDuration = &duration } + if config.BouncersGC.LoginPassword != nil { log.Warning("bouncers auto-delete for login/password auth is not supported (use cert or api)") } } + baJob, err := scheduler.Every(1).Minute().Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) @@ -89,7 +102,6 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched return scheduler, nil } - func (c *Client) FlushOrphans() { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ @@ -98,6 +110,7 @@ func (c *Client) FlushOrphans() { c.Log.Warningf("error while deleting orphan events: %s", err) return } + if eventsCount > 0 { c.Log.Infof("%d deleted orphan events", eventsCount) } @@ -109,103 +122,77 @@ func (c *Client) FlushOrphans() { c.Log.Warningf("error while deleting orphan decisions: %s", err) return } + if eventsCount > 0 { c.Log.Infof("%d deleted orphan decisions", eventsCount) } } -func (c *Client) flushBouncers(bouncersCfg *csconfig.AuthGCCfg) { - if bouncersCfg == nil { +func (c *Client) flushBouncers(authType string, duration *time.Duration) { + if duration == nil { return } - if bouncersCfg.ApiDuration != nil { - log.Debug("trying to delete old bouncers from api") + count, err := c.Ent.Bouncer.Delete().Where( + bouncer.LastPullLTE(time.Now().UTC().Add(-*duration)), + ).Where( + bouncer.AuthTypeEQ(authType), + ).Exec(c.CTX) - deletionCount, err := c.Ent.Bouncer.Delete().Where( - bouncer.LastPullLTE(time.Now().UTC().Add(-*bouncersCfg.ApiDuration)), - ).Where( - bouncer.AuthTypeEQ(types.ApiKeyAuthType), - ).Exec(c.CTX) - if err != nil { - c.Log.Errorf("while auto-deleting expired bouncers (api key): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired bouncers (api auth)", deletionCount) - } + if err != nil { + c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) + return } - if bouncersCfg.CertDuration != nil { - log.Debug("trying to delete old bouncers from cert") - - deletionCount, err := c.Ent.Bouncer.Delete().Where( - bouncer.LastPullLTE(time.Now().UTC().Add(-*bouncersCfg.CertDuration)), - ).Where( - bouncer.AuthTypeEQ(types.TlsAuthType), - ).Exec(c.CTX) - if err != nil { - c.Log.Errorf("while auto-deleting expired bouncers (api key): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired bouncers (api auth)", deletionCount) - } + if count > 0 { + c.Log.Infof("deleted %d expired bouncers (%s)", count, authType) } } -func (c *Client) flushAgents(agentsCfg *csconfig.AuthGCCfg) { - if agentsCfg == nil { +func (c *Client) flushAgents(authType string, duration *time.Duration) { + if duration == nil { return } - if agentsCfg.CertDuration != nil { - log.Debug("trying to delete old agents from cert") - - deletionCount, err := c.Ent.Machine.Delete().Where( - machine.LastHeartbeatLTE(time.Now().UTC().Add(-*agentsCfg.CertDuration)), - ).Where( - machine.Not(machine.HasAlerts()), - ).Where( - machine.AuthTypeEQ(types.TlsAuthType), - ).Exec(c.CTX) - log.Debugf("deleted %d entries", deletionCount) - if err != nil { - c.Log.Errorf("while auto-deleting expired machine (cert): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired machine (cert auth)", deletionCount) - } + count, err := c.Ent.Machine.Delete().Where( + machine.LastHeartbeatLTE(time.Now().UTC().Add(-*duration)), + machine.Not(machine.HasAlerts()), + machine.AuthTypeEQ(authType), + ).Exec(c.CTX) + + if err != nil { + c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) + return } - if agentsCfg.LoginPasswordDuration != nil { - log.Debug("trying to delete old agents from password") - - deletionCount, err := c.Ent.Machine.Delete().Where( - machine.LastHeartbeatLTE(time.Now().UTC().Add(-*agentsCfg.LoginPasswordDuration)), - ).Where( - machine.Not(machine.HasAlerts()), - ).Where( - machine.AuthTypeEQ(types.PasswordAuthType), - ).Exec(c.CTX) - log.Debugf("deleted %d entries", deletionCount) - if err != nil { - c.Log.Errorf("while auto-deleting expired machine (password): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired machine (password auth)", deletionCount) - } + if count > 0 { + c.Log.Infof("deleted %d expired machines (%s auth)", count, authType) } } func (c *Client) FlushAgentsAndBouncers(agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { log.Debug("starting FlushAgentsAndBouncers") - c.flushBouncers(bouncersCfg) - c.flushAgents(agentsCfg) + if agentsCfg != nil { + c.flushAgents(types.TlsAuthType, agentsCfg.CertDuration) + c.flushAgents(types.PasswordAuthType, agentsCfg.LoginPasswordDuration) + } + + if bouncersCfg != nil { + c.flushBouncers(types.TlsAuthType, bouncersCfg.CertDuration) + c.flushBouncers(types.ApiKeyAuthType, bouncersCfg.ApiDuration) + } return nil } func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { - var deletedByAge int - var deletedByNbItem int - var totalAlerts int - var err error + var ( + deletedByAge int + deletedByNbItem int + totalAlerts int + err error + ) if !c.CanFlush { c.Log.Debug("a list is being imported, flushing later") @@ -215,6 +202,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { c.Log.Debug("Flushing orphan alerts") c.FlushOrphans() c.Log.Debug("Done flushing orphan alerts") + totalAlerts, err = c.TotalAlerts() if err != nil { c.Log.Warningf("FlushAlerts (max items count): %s", err) @@ -222,10 +210,12 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) + if MaxAge != "" { filter := map[string][]string{ "created_before": {MaxAge}, } + nbDeleted, err := c.DeleteAlertWithFilter(filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) @@ -235,19 +225,21 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted) deletedByAge = nbDeleted } + if MaxItems > 0 { - //We get the highest id for the alerts - //We subtract MaxItems to avoid deleting alerts that are not old enough - //This gives us the oldest alert that we want to keep - //We then delete all the alerts with an id lower than this one - //We can do this because the id is auto-increment, and the database won't reuse the same id twice + // We get the highest id for the alerts + // We subtract MaxItems to avoid deleting alerts that are not old enough + // This gives us the oldest alert that we want to keep + // We then delete all the alerts with an id lower than this one + // We can do this because the id is auto-increment, and the database won't reuse the same id twice lastAlert, err := c.QueryAlertWithFilter(map[string][]string{ "sort": {"DESC"}, "limit": {"1"}, - //we do not care about fetching the edges, we just want the id + // we do not care about fetching the edges, we just want the id "with_decisions": {"false"}, }) c.Log.Debugf("FlushAlerts (last alert): %+v", lastAlert) + if err != nil { c.Log.Errorf("FlushAlerts: could not get last alert: %s", err) return fmt.Errorf("could not get last alert: %w", err) @@ -259,7 +251,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { c.Log.Debugf("FlushAlerts (max id): %d", maxid) if maxid > 0 { - //This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted + // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) if err != nil { @@ -269,11 +261,16 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } } } + if deletedByNbItem > 0 { - c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", deletedByNbItem, totalAlerts, MaxItems) + c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", + deletedByNbItem, totalAlerts, MaxItems) } + if deletedByAge > 0 { - c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", deletedByAge, totalAlerts, MaxAge) + c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", + deletedByAge, totalAlerts, MaxAge) } + return nil } From 95bc5880f4ad65c799cc6d85d4ae54bcd9f95d36 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Fri, 31 May 2024 15:56:13 +0200 Subject: [PATCH 150/318] update vagrant image for fc39, fc40 and ubuntu24.04 (#3042) * update test distribution * add skip file to avoid mysql tests * add ubuntu 24.04 --- test/ansible/vagrant/{fedora-33 => fedora-37}/skip | 0 test/ansible/vagrant/{fedora-34 => fedora-38}/skip | 0 .../vagrant/{fedora-33 => fedora-39}/Vagrantfile | 3 +-- test/ansible/vagrant/fedora-39/skip | 9 +++++++++ .../vagrant/{fedora-34 => fedora-40}/Vagrantfile | 3 +-- test/ansible/vagrant/fedora-40/skip | 9 +++++++++ test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile | 10 ++++++++++ 7 files changed, 30 insertions(+), 4 deletions(-) rename test/ansible/vagrant/{fedora-33 => fedora-37}/skip (100%) mode change 100755 => 100644 rename test/ansible/vagrant/{fedora-34 => fedora-38}/skip (100%) mode change 100755 => 100644 rename test/ansible/vagrant/{fedora-33 => fedora-39}/Vagrantfile (69%) create mode 100644 test/ansible/vagrant/fedora-39/skip rename test/ansible/vagrant/{fedora-34 => fedora-40}/Vagrantfile (69%) create mode 100644 test/ansible/vagrant/fedora-40/skip create mode 100644 test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile diff --git a/test/ansible/vagrant/fedora-33/skip b/test/ansible/vagrant/fedora-37/skip old mode 100755 new mode 100644 similarity index 100% rename from test/ansible/vagrant/fedora-33/skip rename to test/ansible/vagrant/fedora-37/skip diff --git a/test/ansible/vagrant/fedora-34/skip b/test/ansible/vagrant/fedora-38/skip old mode 100755 new mode 100644 similarity index 100% rename from test/ansible/vagrant/fedora-34/skip rename to test/ansible/vagrant/fedora-38/skip diff --git a/test/ansible/vagrant/fedora-33/Vagrantfile b/test/ansible/vagrant/fedora-39/Vagrantfile similarity index 69% rename from test/ansible/vagrant/fedora-33/Vagrantfile rename to test/ansible/vagrant/fedora-39/Vagrantfile index df6f06944ae..ec03661fe39 100644 --- a/test/ansible/vagrant/fedora-33/Vagrantfile +++ b/test/ansible/vagrant/fedora-39/Vagrantfile @@ -1,8 +1,7 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - # config.vm.box = "fedora/33-cloud-base" - config.vm.box = 'generic/fedora33' + config.vm.box = "fedora/39-cloud-base" config.vm.provision "shell", inline: <<-SHELL SHELL end diff --git a/test/ansible/vagrant/fedora-39/skip b/test/ansible/vagrant/fedora-39/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/fedora-39/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/test/ansible/vagrant/fedora-34/Vagrantfile b/test/ansible/vagrant/fedora-40/Vagrantfile similarity index 69% rename from test/ansible/vagrant/fedora-34/Vagrantfile rename to test/ansible/vagrant/fedora-40/Vagrantfile index db2db8d0879..ec03661fe39 100644 --- a/test/ansible/vagrant/fedora-34/Vagrantfile +++ b/test/ansible/vagrant/fedora-40/Vagrantfile @@ -1,8 +1,7 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - # config.vm.box = "fedora/34-cloud-base" - config.vm.box = 'generic/fedora34' + config.vm.box = "fedora/39-cloud-base" config.vm.provision "shell", inline: <<-SHELL SHELL end diff --git a/test/ansible/vagrant/fedora-40/skip b/test/ansible/vagrant/fedora-40/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/fedora-40/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile b/test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile new file mode 100644 index 00000000000..52490900fd8 --- /dev/null +++ b/test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'alvistack/ubuntu-24.04' + config.vm.provision "shell", inline: <<-SHELL + SHELL +end + +common = '../common' +load common if File.exist?(common) From 599a5a49a1733057e6537bf6f4148124fe3e605a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 16:08:06 +0200 Subject: [PATCH 151/318] pkg/dumps.DumpTree: split to reduce complexity (#3001) * pkg/dumps.DumpTree: split to reduce complexity * lint --- .golangci.yml | 8 ++-- pkg/dumps/parser_dump.go | 84 +++++++++++++++++++++++++--------------- 2 files changed, 57 insertions(+), 35 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c044903d0f2..544cebbac6a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,7 +3,7 @@ linters-settings: cyclop: # lower this after refactoring - max-complexity: 48 + max-complexity: 45 gci: sections: @@ -18,11 +18,11 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 145 + min-complexity: 128 gocyclo: # lower this after refactoring - min-complexity: 48 + min-complexity: 45 funlen: # Checks the number of lines in a function. @@ -55,7 +55,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 28 + min-complexity: 24 nlreturn: block-size: 5 diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 227f96315d4..7d4e09ecf1a 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -92,40 +92,59 @@ func LoadParserDump(filepath string) (*ParserResults, error) { return &pdump, nil } -func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpOpts) { +type tree struct { // note : we can use line -> time as the unique identifier (of acquisition) - state := make(map[time.Time]map[string]map[string]ParserResult) - assoc := make(map[time.Time]string, 0) - parser_order := make(map[string][]string) + state map[time.Time]map[string]map[string]ParserResult + assoc map[time.Time]string + parserOrder map[string][]string +} + +func newTree() *tree { + return &tree{ + state: make(map[time.Time]map[string]map[string]ParserResult), + assoc: make(map[time.Time]string), + parserOrder: make(map[string][]string), + } +} +func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpOpts) { + t := newTree() + t.processEvents(parserResults) + t.processBuckets(bucketPour) + t.displayResults(opts) +} + +func (t *tree) processEvents(parserResults ParserResults) { for stage, parsers := range parserResults { // let's process parsers in the order according to idx - parser_order[stage] = make([]string, len(parsers)) + t.parserOrder[stage] = make([]string, len(parsers)) for pname, parser := range parsers { if len(parser) > 0 { - parser_order[stage][parser[0].Idx-1] = pname + t.parserOrder[stage][parser[0].Idx-1] = pname } } - for _, parser := range parser_order[stage] { + for _, parser := range t.parserOrder[stage] { results := parsers[parser] for _, parserRes := range results { evt := parserRes.Evt - if _, ok := state[evt.Line.Time]; !ok { - state[evt.Line.Time] = make(map[string]map[string]ParserResult) - assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[evt.Line.Time]; !ok { + t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[evt.Line.Time] = evt.Line.Raw } - if _, ok := state[evt.Line.Time][stage]; !ok { - state[evt.Line.Time][stage] = make(map[string]ParserResult) + if _, ok := t.state[evt.Line.Time][stage]; !ok { + t.state[evt.Line.Time][stage] = make(map[string]ParserResult) } - state[evt.Line.Time][stage][parser] = ParserResult{Evt: evt, Success: parserRes.Success} + t.state[evt.Line.Time][stage][parser] = ParserResult{Evt: evt, Success: parserRes.Success} } } } +} +func (t *tree) processBuckets(bucketPour BucketPourInfo) { for bname, evtlist := range bucketPour { for _, evt := range evtlist { if evt.Line.Raw == "" { @@ -133,38 +152,41 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO } // it might be bucket overflow being reprocessed, skip this - if _, ok := state[evt.Line.Time]; !ok { - state[evt.Line.Time] = make(map[string]map[string]ParserResult) - assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[evt.Line.Time]; !ok { + t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[evt.Line.Time] = evt.Line.Raw } // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered - if _, ok := state[evt.Line.Time]["buckets"]; !ok { - state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) + if _, ok := t.state[evt.Line.Time]["buckets"]; !ok { + t.state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) } - state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} + t.state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} } } +} +func (t *tree) displayResults(opts DumpOpts) { yellow := color.New(color.FgYellow).SprintFunc() red := color.New(color.FgRed).SprintFunc() green := color.New(color.FgGreen).SprintFunc() whitelistReason := "" + // get each line - for tstamp, rawstr := range assoc { + for tstamp, rawstr := range t.assoc { if opts.SkipOk { - if _, ok := state[tstamp]["buckets"]["OK"]; ok { + if _, ok := t.state[tstamp]["buckets"]["OK"]; ok { continue } } fmt.Printf("line: %s\n", rawstr) - skeys := make([]string, 0, len(state[tstamp])) + skeys := make([]string, 0, len(t.state[tstamp])) - for k := range state[tstamp] { + for k := range t.state[tstamp] { // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "buckets" { @@ -180,18 +202,18 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO var prevItem types.Event for _, stage := range skeys { - parsers := state[tstamp][stage] + parsers := t.state[tstamp][stage] sep := "├" presep := "|" fmt.Printf("\t%s %s\n", sep, stage) - for idx, parser := range parser_order[stage] { + for idx, parser := range t.parserOrder[stage] { res := parsers[parser].Success sep := "├" - if idx == len(parser_order[stage])-1 { + if idx == len(t.parserOrder[stage])-1 { sep = "└" } @@ -278,12 +300,12 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO sep := "└" - if len(state[tstamp]["buckets"]) > 0 { + if len(t.state[tstamp]["buckets"]) > 0 { sep = "├" } // did the event enter the bucket pour phase ? - if _, ok := state[tstamp]["buckets"]["OK"]; ok { + if _, ok := t.state[tstamp]["buckets"]["OK"]; ok { fmt.Printf("\t%s-------- parser success %s\n", sep, emoji.GreenCircle) } else if whitelistReason != "" { fmt.Printf("\t%s-------- parser success, ignored by whitelist (%s) %s\n", sep, whitelistReason, emoji.GreenCircle) @@ -292,13 +314,13 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO } // now print bucket info - if len(state[tstamp]["buckets"]) > 0 { + if len(t.state[tstamp]["buckets"]) > 0 { fmt.Printf("\t├ Scenarios\n") } - bnames := make([]string, 0, len(state[tstamp]["buckets"])) + bnames := make([]string, 0, len(t.state[tstamp]["buckets"])) - for k := range state[tstamp]["buckets"] { + for k := range t.state[tstamp]["buckets"] { // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "OK" { From 8501f459c360c30bb7dae895c8c4a29cf803a339 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 16:24:33 +0200 Subject: [PATCH 152/318] db,lapi: round durations and timestamps to 1 second (#3015) * cscli/lapi: round decision duration to 1 second * db: round created, updated... timestamps to 1 second --- pkg/apiserver/controllers/v1/alerts.go | 2 +- pkg/apiserver/controllers/v1/decisions.go | 2 +- pkg/types/utils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 19dbf8d0cfa..7483e8dcdf9 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -80,7 +80,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { } for _, decisionItem := range alert.Edges.Decisions { - duration := decisionItem.Until.Sub(time.Now().UTC()).String() + duration := decisionItem.Until.Sub(time.Now().UTC()).Round(time.Second).String() outputAlert.Decisions = append(outputAlert.Decisions, &models.Decision{ Duration: &duration, // transform into time.Time ? Scenario: &decisionItem.Scenario, diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index f3c6a7bba26..5cc7628458c 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -20,7 +20,7 @@ func FormatDecisions(decisions []*ent.Decision) []*models.Decision { var results []*models.Decision for _, dbDecision := range decisions { - duration := dbDecision.Until.Sub(time.Now().UTC()).String() + duration := dbDecision.Until.Sub(time.Now().UTC()).Round(time.Second).String() decision := models.Decision{ ID: int64(dbDecision.ID), Duration: &duration, diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 712d44ba12d..384dd00367e 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -66,7 +66,7 @@ func ConfigureLogger(clog *log.Logger) error { } func UtcNow() time.Time { - return time.Now().UTC() + return time.Now().UTC().Round(time.Second) } func IsNetworkFS(path string) (bool, string, error) { From 89d02e94582d9dbc6ad52961429e8da289df8638 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 16:45:47 +0200 Subject: [PATCH 153/318] CI/lint: enable some "revive" rules (#3041) argument-limit Warns when a function receives more parameters than the maximum set by the rule's configuration. bare-return Warns on bare (a.k.a. naked) returns context-as-argument By convention, context.Context should be the first parameter of a function. duplicated-imports It is possible to unintentionally import the same package twice. modifies-parameter A function that modifies its parameters can be hard to understand. It can also be misleading if the arguments are passed by value by the caller. redundant-import-alias This rule warns on redundant import aliases. unreachable-code This rule spots and proposes to remove unreachable code. --- .golangci.yml | 22 +++++++------------ cmd/crowdsec/metrics.go | 2 +- pkg/acquisition/modules/file/file_test.go | 2 +- .../loki/internal/lokiclient/loki_client.go | 4 ++-- pkg/acquisition/modules/loki/loki.go | 2 +- pkg/apiserver/apiserver.go | 2 +- pkg/apiserver/controllers/controller.go | 2 +- pkg/appsec/request.go | 3 +-- pkg/csplugin/hclog_adapter.go | 2 +- 9 files changed, 17 insertions(+), 24 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 544cebbac6a..34393fce56e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -118,10 +118,6 @@ linters-settings: rules: - name: add-constant disabled: true - - name: argument-limit - disabled: true - - name: bare-return - disabled: true - name: blank-imports disabled: true - name: bool-literal-in-expr @@ -132,16 +128,12 @@ linters-settings: disabled: true - name: confusing-results disabled: true - - name: context-as-argument - disabled: true - name: cyclomatic disabled: true - name: deep-exit disabled: true - name: defer disabled: true - - name: duplicated-imports - disabled: true - name: early-return disabled: true - name: empty-block @@ -176,16 +168,12 @@ linters-settings: disabled: true - name: max-public-structs disabled: true - - name: modifies-parameter - disabled: true - name: optimize-operands-order disabled: true - name: nested-structs disabled: true - name: package-comments disabled: true - - name: redundant-import-alias - disabled: true - name: struct-tag disabled: true - name: superfluous-else @@ -212,8 +200,6 @@ linters-settings: - "fmt.Println" - name: unnecessary-stmt disabled: true - - name: unreachable-code - disabled: true - name: unused-parameter disabled: true - name: unused-receiver @@ -506,3 +492,11 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: + - revive + path: pkg/metabase/metabase.go + + - linters: + - revive + path: cmd/crowdsec-cli/copyfile.go + diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index cc0c118b436..d3c6e172091 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -12,7 +12,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index ad5fe8bfabd..0873b837a3f 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -416,8 +416,8 @@ force_inotify: true`, testPattern), for i := 0; i < 5; i++ { _, err = fmt.Fprintf(fd, "%d\n", i) if err != nil { - t.Fatalf("could not write test file : %s", err) os.Remove("test_files/stream.log") + t.Fatalf("could not write test file : %s", err) } } diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index be14939c44d..6437a951504 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -106,7 +106,7 @@ func (lc *LokiClient) decreaseTicker(ticker *time.Ticker) { } } -func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQueryRangeResponse, infinite bool) error { +func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQueryRangeResponse, infinite bool) error { lc.currentTickerInterval = 100 * time.Millisecond ticker := time.NewTicker(lc.currentTickerInterval) defer ticker.Stop() @@ -296,7 +296,7 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ lc.Logger.Infof("Connecting to %s", url) lc.t.Go(func() error { - return lc.queryRange(url, ctx, c, infinite) + return lc.queryRange(ctx, url, c, infinite) }) return c } diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 52b87eb95d3..15c454723ee 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -19,7 +19,7 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - lokiclient "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki/internal/lokiclient" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki/internal/lokiclient" "github.com/crowdsecurity/crowdsec/pkg/types" ) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 6592c8bbf76..056f74a7b31 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index bab1965123e..51f359244be 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -9,7 +9,7 @@ import ( "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 66b5d797fd7..d0e3632abda 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -12,7 +12,6 @@ import ( "regexp" "github.com/google/uuid" - "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" ) @@ -276,7 +275,7 @@ func (r *ReqDumpFilter) ToJSON() error { } // Generate a ParsedRequest from a http.Request. ParsedRequest can be consumed by the App security Engine -func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedRequest, error) { +func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequest, error) { var err error contentLength := r.ContentLength if contentLength < 0 { diff --git a/pkg/csplugin/hclog_adapter.go b/pkg/csplugin/hclog_adapter.go index 9550e4b4539..58190684ebc 100644 --- a/pkg/csplugin/hclog_adapter.go +++ b/pkg/csplugin/hclog_adapter.go @@ -230,5 +230,5 @@ func safeString(str fmt.Stringer) (s string) { }() s = str.String() - return + return //nolint:revive // bare return for the defer } From c1e858cbcaf17b0b4f61991c3b268f43dfe1559a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 19:15:05 +0200 Subject: [PATCH 154/318] cscli: refactor alerts/console/decisions/setup (#3003) * cscli alerts: extract list(), inspect() * cscli console: extract enroll() * cscli decisions: extract list() * support dump: don't use global * cscli setup: split command constructors * lint --- cmd/crowdsec-cli/alerts.go | 239 ++++++++++++++++++---------------- cmd/crowdsec-cli/console.go | 178 +++++++++++++------------ cmd/crowdsec-cli/decisions.go | 177 +++++++++++++------------ cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/setup.go | 153 +++++++++++----------- cmd/crowdsec-cli/support.go | 13 +- 6 files changed, 407 insertions(+), 355 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index d32af6ae56c..9f4c5093afc 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -109,7 +109,8 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi return nil } -var alertTemplate = ` +func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error { + alertTemplate := ` ################################################################################################ - ID : {{.ID}} @@ -127,7 +128,6 @@ var alertTemplate = ` ` -func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error { tmpl, err := template.New("alert").Parse(alertTemplate) if err != nil { return err @@ -228,6 +228,92 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { return cmd } +func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { + if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, + alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { + return err + } + + if limit != nil { + alertListFilter.Limit = limit + } + + if *alertListFilter.Until == "" { + alertListFilter.Until = nil + } else if strings.HasSuffix(*alertListFilter.Until, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) + } + + *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + + if *alertListFilter.Since == "" { + alertListFilter.Since = nil + } else if strings.HasSuffix(*alertListFilter.Since, "d") { + // time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier + realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) + } + + *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + + if *alertListFilter.IncludeCAPI { + *alertListFilter.Limit = 0 + } + + if *alertListFilter.TypeEquals == "" { + alertListFilter.TypeEquals = nil + } + + if *alertListFilter.ScopeEquals == "" { + alertListFilter.ScopeEquals = nil + } + + if *alertListFilter.ValueEquals == "" { + alertListFilter.ValueEquals = nil + } + + if *alertListFilter.ScenarioEquals == "" { + alertListFilter.ScenarioEquals = nil + } + + if *alertListFilter.IPEquals == "" { + alertListFilter.IPEquals = nil + } + + if *alertListFilter.RangeEquals == "" { + alertListFilter.RangeEquals = nil + } + + if *alertListFilter.OriginEquals == "" { + alertListFilter.OriginEquals = nil + } + + if contained != nil && *contained { + alertListFilter.Contains = new(bool) + } + + alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) + if err != nil { + return fmt.Errorf("unable to list alerts: %w", err) + } + + if err = cli.alertsToTable(alerts, printMachine); err != nil { + return fmt.Errorf("unable to list alerts: %w", err) + } + + return nil +} + func (cli *cliAlerts) NewListCmd() *cobra.Command { alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), @@ -259,81 +345,7 @@ cscli alerts list --type ban`, Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, - alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { - printHelp(cmd) - return err - } - if limit != nil { - alertListFilter.Limit = limit - } - - if *alertListFilter.Until == "" { - alertListFilter.Until = nil - } else if strings.HasSuffix(*alertListFilter.Until, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) - } - *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") - } - if *alertListFilter.Since == "" { - alertListFilter.Since = nil - } else if strings.HasSuffix(*alertListFilter.Since, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) - } - *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") - } - - if *alertListFilter.IncludeCAPI { - *alertListFilter.Limit = 0 - } - - if *alertListFilter.TypeEquals == "" { - alertListFilter.TypeEquals = nil - } - if *alertListFilter.ScopeEquals == "" { - alertListFilter.ScopeEquals = nil - } - if *alertListFilter.ValueEquals == "" { - alertListFilter.ValueEquals = nil - } - if *alertListFilter.ScenarioEquals == "" { - alertListFilter.ScenarioEquals = nil - } - if *alertListFilter.IPEquals == "" { - alertListFilter.IPEquals = nil - } - if *alertListFilter.RangeEquals == "" { - alertListFilter.RangeEquals = nil - } - - if *alertListFilter.OriginEquals == "" { - alertListFilter.OriginEquals = nil - } - - if contained != nil && *contained { - alertListFilter.Contains = new(bool) - } - - alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) - if err != nil { - return fmt.Errorf("unable to list alerts: %w", err) - } - - if err = cli.alertsToTable(alerts, printMachine); err != nil { - return fmt.Errorf("unable to list alerts: %w", err) - } - - return nil + return cli.list(alertListFilter, limit, contained, printMachine) }, } @@ -466,6 +478,46 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return cmd } +func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { + cfg := cli.cfg() + + for _, alertID := range alertIDs { + id, err := strconv.Atoi(alertID) + if err != nil { + return fmt.Errorf("bad alert id %s", alertID) + } + + alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) + if err != nil { + return fmt.Errorf("can't find alert with id %s: %w", alertID, err) + } + + switch cfg.Cscli.Output { + case "human": + if err := cli.displayOneAlert(alert, details); err != nil { + log.Warnf("unable to display alert with id %s: %s", alertID, err) + continue + } + case "json": + data, err := json.MarshalIndent(alert, "", " ") + if err != nil { + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + } + + fmt.Printf("%s\n", string(data)) + case "raw": + data, err := yaml.Marshal(alert) + if err != nil { + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + } + + fmt.Println(string(data)) + } + } + + return nil +} + func (cli *cliAlerts) NewInspectCmd() *cobra.Command { var details bool @@ -475,42 +527,11 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { Example: `cscli alerts inspect 123`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - cfg := cli.cfg() if len(args) == 0 { printHelp(cmd) return errors.New("missing alert_id") } - for _, alertID := range args { - id, err := strconv.Atoi(alertID) - if err != nil { - return fmt.Errorf("bad alert id %s", alertID) - } - alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) - if err != nil { - return fmt.Errorf("can't find alert with id %s: %w", alertID, err) - } - switch cfg.Cscli.Output { - case "human": - if err := cli.displayOneAlert(alert, details); err != nil { - log.Warnf("unable to display alert with id %s: %s", alertID, err) - continue - } - case "json": - data, err := json.MarshalIndent(alert, "", " ") - if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) - } - fmt.Printf("%s\n", string(data)) - case "raw": - data, err := yaml.Marshal(alert) - if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) - } - fmt.Println(string(data)) - } - } - - return nil + return cli.inspect(details, args...) }, } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 149b7656884..eab24a5d709 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -67,6 +67,99 @@ func (cli *cliConsole) NewCommand() *cobra.Command { return cmd } +func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []string, opts []string) error { + cfg := cli.cfg() + password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) + + apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) + if err != nil { + return fmt.Errorf("could not parse CAPI URL: %w", err) + } + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) + if err != nil { + return fmt.Errorf("failed to get installed scenarios: %w", err) + } + + if len(scenarios) == 0 { + scenarios = make([]string, 0) + } + + enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} + + if len(opts) != 0 { + for _, opt := range opts { + valid := false + + if opt == "all" { + enableOpts = csconfig.CONSOLE_CONFIGS + break + } + + for _, availableOpt := range csconfig.CONSOLE_CONFIGS { + if opt == availableOpt { + valid = true + enable := true + + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { + enable = false + continue + } + } + + if enable { + enableOpts = append(enableOpts, opt) + } + + break + } + } + + if !valid { + return fmt.Errorf("option %s doesn't exist", opt) + } + } + } + + c, _ := apiclient.NewClient(&apiclient.Config{ + MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, + Password: password, + Scenarios: scenarios, + UserAgent: cwversion.UserAgent(), + URL: apiURL, + VersionPrefix: "v3", + }) + + resp, err := c.Auth.EnrollWatcher(context.Background(), key, name, tags, overwrite) + if err != nil { + return fmt.Errorf("could not enroll instance: %w", err) + } + + if resp.Response.StatusCode == 200 && !overwrite { + log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") + return nil + } + + if err := cli.setConsoleOpts(enableOpts, true); err != nil { + return err + } + + for _, opt := range enableOpts { + log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) + } + + log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") + log.Info("Please restart crowdsec after accepting the enrollment.") + + return nil +} + func (cli *cliConsole) newEnrollCmd() *cobra.Command { name := "" overwrite := false @@ -90,90 +183,7 @@ After running this command your will need to validate the enrollment in the weba Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - cfg := cli.cfg() - password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) - - apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) - if err != nil { - return fmt.Errorf("could not parse CAPI URL: %w", err) - } - - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get installed scenarios: %w", err) - } - - if len(scenarios) == 0 { - scenarios = make([]string, 0) - } - - enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} - if len(opts) != 0 { - for _, opt := range opts { - valid := false - if opt == "all" { - enableOpts = csconfig.CONSOLE_CONFIGS - break - } - for _, availableOpt := range csconfig.CONSOLE_CONFIGS { - if opt == availableOpt { - valid = true - enable := true - for _, enabledOpt := range enableOpts { - if opt == enabledOpt { - enable = false - continue - } - } - if enable { - enableOpts = append(enableOpts, opt) - } - - break - } - } - if !valid { - return fmt.Errorf("option %s doesn't exist", opt) - } - } - } - - c, _ := apiclient.NewClient(&apiclient.Config{ - MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, - Password: password, - Scenarios: scenarios, - UserAgent: cwversion.UserAgent(), - URL: apiURL, - VersionPrefix: "v3", - }) - - resp, err := c.Auth.EnrollWatcher(context.Background(), args[0], name, tags, overwrite) - if err != nil { - return fmt.Errorf("could not enroll instance: %w", err) - } - - if resp.Response.StatusCode == 200 && !overwrite { - log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") - return nil - } - - if err := cli.setConsoleOpts(enableOpts, true); err != nil { - return err - } - - for _, opt := range enableOpts { - log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) - } - - log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") - log.Info("Please restart crowdsec after accepting the enrollment.") - - return nil + return cli.enroll(args[0], name, overwrite, tags, opts) }, } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 9b9159561be..de3bf73dca2 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -169,8 +169,99 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } +func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { + var err error + /*take care of shorthand options*/ + if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + return err + } + + filter.ActiveDecisionEquals = new(bool) + *filter.ActiveDecisionEquals = true + + if NoSimu != nil && *NoSimu { + filter.IncludeSimulated = new(bool) + } + /* nullify the empty entries to avoid bad filter */ + if *filter.Until == "" { + filter.Until = nil + } else if strings.HasSuffix(*filter.Until, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*filter.Until, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) + } + + *filter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + + if *filter.Since == "" { + filter.Since = nil + } else if strings.HasSuffix(*filter.Since, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*filter.Since, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Since) + } + + *filter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + + if *filter.IncludeCAPI { + *filter.Limit = 0 + } + + if *filter.TypeEquals == "" { + filter.TypeEquals = nil + } + + if *filter.ValueEquals == "" { + filter.ValueEquals = nil + } + + if *filter.ScopeEquals == "" { + filter.ScopeEquals = nil + } + + if *filter.ScenarioEquals == "" { + filter.ScenarioEquals = nil + } + + if *filter.IPEquals == "" { + filter.IPEquals = nil + } + + if *filter.RangeEquals == "" { + filter.RangeEquals = nil + } + + if *filter.OriginEquals == "" { + filter.OriginEquals = nil + } + + if contained != nil && *contained { + filter.Contains = new(bool) + } + + alerts, _, err := Client.Alerts.List(context.Background(), filter) + if err != nil { + return fmt.Errorf("unable to retrieve decisions: %w", err) + } + + err = cli.decisionsToTable(alerts, printMachine) + if err != nil { + return fmt.Errorf("unable to print decisions: %w", err) + } + + return nil +} + func (cli *cliDecisions) newListCmd() *cobra.Command { - var filter = apiclient.AlertsListOpts{ + filter := apiclient.AlertsListOpts{ ValueEquals: new(string), ScopeEquals: new(string), ScenarioEquals: new(string), @@ -200,83 +291,7 @@ cscli decisions list --origin lists --scenario list_name Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { - return err - } - filter.ActiveDecisionEquals = new(bool) - *filter.ActiveDecisionEquals = true - if NoSimu != nil && *NoSimu { - filter.IncludeSimulated = new(bool) - } - /* nullify the empty entries to avoid bad filter */ - if *filter.Until == "" { - filter.Until = nil - } else if strings.HasSuffix(*filter.Until, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*filter.Until, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) - } - *filter.Until = fmt.Sprintf("%d%s", days*24, "h") - } - - if *filter.Since == "" { - filter.Since = nil - } else if strings.HasSuffix(*filter.Since, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*filter.Since, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Since) - } - *filter.Since = fmt.Sprintf("%d%s", days*24, "h") - } - if *filter.IncludeCAPI { - *filter.Limit = 0 - } - if *filter.TypeEquals == "" { - filter.TypeEquals = nil - } - if *filter.ValueEquals == "" { - filter.ValueEquals = nil - } - if *filter.ScopeEquals == "" { - filter.ScopeEquals = nil - } - if *filter.ScenarioEquals == "" { - filter.ScenarioEquals = nil - } - if *filter.IPEquals == "" { - filter.IPEquals = nil - } - if *filter.RangeEquals == "" { - filter.RangeEquals = nil - } - - if *filter.OriginEquals == "" { - filter.OriginEquals = nil - } - - if contained != nil && *contained { - filter.Contains = new(bool) - } - - alerts, _, err := Client.Alerts.List(context.Background(), filter) - if err != nil { - return fmt.Errorf("unable to retrieve decisions: %w", err) - } - - err = cli.decisionsToTable(alerts, printMachine) - if err != nil { - return fmt.Errorf("unable to print decisions: %w", err) - } - - return nil + return cli.list(filter, NoSimu, contained, printMachine) }, } cmd.Flags().SortFlags = false @@ -489,15 +504,15 @@ cscli decisions delete --origin lists --scenario list_name if delDecisionID == "" { decisions, _, err = Client.Decisions.Delete(context.Background(), delFilter) if err != nil { - return fmt.Errorf("unable to delete decisions: %v", err) + return fmt.Errorf("unable to delete decisions: %w", err) } } else { if _, err = strconv.Atoi(delDecisionID); err != nil { - return fmt.Errorf("id '%s' is not an integer: %v", delDecisionID, err) + return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) } decisions, _, err = Client.Decisions.DeleteOne(context.Background(), delDecisionID) if err != nil { - return fmt.Errorf("unable to delete decision: %v", err) + return fmt.Errorf("unable to delete decision: %w", err) } } log.Infof("%s decision(s) deleted", decisions.NbDeleted) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 3881818123f..622fe50a09d 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -269,7 +269,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(NewSetupCmd()) + cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) } return cmd diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index ba3670848d8..8a8c5a6c665 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -17,9 +17,18 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/setup" ) -// NewSetupCmd defines the "cscli setup" command. -func NewSetupCmd() *cobra.Command { - cmdSetup := &cobra.Command{ +type cliSetup struct { + cfg configGetter +} + +func NewCLISetup(cfg configGetter) *cliSetup { + return &cliSetup{ + cfg: cfg, + } +} + +func (cli *cliSetup) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "setup", Short: "Tools to configure crowdsec", Long: "Manage hub configuration and service detection", @@ -27,83 +36,79 @@ func NewSetupCmd() *cobra.Command { DisableAutoGenTag: true, } - // - // cscli setup detect - // - { - cmdSetupDetect := &cobra.Command{ - Use: "detect", - Short: "detect running services, generate a setup file", - DisableAutoGenTag: true, - RunE: runSetupDetect, - } + cmd.AddCommand(cli.NewDetectCmd()) + cmd.AddCommand(cli.NewInstallHubCmd()) + cmd.AddCommand(cli.NewDataSourcesCmd()) + cmd.AddCommand(cli.NewValidateCmd()) - defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") - - flags := cmdSetupDetect.Flags() - flags.String("detect-config", defaultServiceDetect, "path to service detection configuration") - flags.Bool("list-supported-services", false, "do not detect; only print supported services") - flags.StringSlice("force-unit", nil, "force detection of a systemd unit (can be repeated)") - flags.StringSlice("force-process", nil, "force detection of a running process (can be repeated)") - flags.StringSlice("skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") - flags.String("force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") - flags.String("force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") - flags.String("force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") - flags.Bool("snub-systemd", false, "don't use systemd, even if available") - flags.Bool("yaml", false, "output yaml, not json") - cmdSetup.AddCommand(cmdSetupDetect) - } - - // - // cscli setup install-hub - // - { - cmdSetupInstallHub := &cobra.Command{ - Use: "install-hub [setup_file] [flags]", - Short: "install items from a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: runSetupInstallHub, - } + return cmd +} - flags := cmdSetupInstallHub.Flags() - flags.Bool("dry-run", false, "don't install anything; print out what would have been") - cmdSetup.AddCommand(cmdSetupInstallHub) - } - - // - // cscli setup datasources - // - { - cmdSetupDataSources := &cobra.Command{ - Use: "datasources [setup_file] [flags]", - Short: "generate datasource (acquisition) configuration from a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: runSetupDataSources, - } +func (cli *cliSetup) NewDetectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "detect", + Short: "detect running services, generate a setup file", + DisableAutoGenTag: true, + RunE: runSetupDetect, + } - flags := cmdSetupDataSources.Flags() - flags.String("to-dir", "", "write the configuration to a directory, in multiple files") - cmdSetup.AddCommand(cmdSetupDataSources) - } - - // - // cscli setup validate - // - { - cmdSetupValidate := &cobra.Command{ - Use: "validate [setup_file]", - Short: "validate a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: runSetupValidate, - } + defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") + + flags := cmd.Flags() + flags.String("detect-config", defaultServiceDetect, "path to service detection configuration") + flags.Bool("list-supported-services", false, "do not detect; only print supported services") + flags.StringSlice("force-unit", nil, "force detection of a systemd unit (can be repeated)") + flags.StringSlice("force-process", nil, "force detection of a running process (can be repeated)") + flags.StringSlice("skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") + flags.String("force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") + flags.String("force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") + flags.String("force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") + flags.Bool("snub-systemd", false, "don't use systemd, even if available") + flags.Bool("yaml", false, "output yaml, not json") + + return cmd +} - cmdSetup.AddCommand(cmdSetupValidate) +func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "install-hub [setup_file] [flags]", + Short: "install items from a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: runSetupInstallHub, + } + + flags := cmd.Flags() + flags.Bool("dry-run", false, "don't install anything; print out what would have been") + + return cmd +} + +func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "datasources [setup_file] [flags]", + Short: "generate datasource (acquisition) configuration from a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: runSetupDataSources, + } + + flags := cmd.Flags() + flags.String("to-dir", "", "write the configuration to a directory, in multiple files") + + return cmd +} + +func (cli *cliSetup) NewValidateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validate [setup_file]", + Short: "validate a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: runSetupValidate, } - return cmdSetup + return cmd } func runSetupDetect(cmd *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 61fa2b55817..41e23dc9654 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -294,7 +295,7 @@ func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { return nil } -func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, endpoint string) error { +func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, prometheusCfg csconfig.PrometheusCfg, endpoint string) error { log.Infof("Collecting pprof/%s data", endpoint) ctx, cancel := context.WithTimeout(ctx, 120*time.Second) @@ -306,8 +307,8 @@ func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, endpoint s fmt.Sprintf( "http://%s/debug/pprof/%s?debug=1", net.JoinHostPort( - csConfig.Prometheus.ListenAddr, - strconv.Itoa(csConfig.Prometheus.ListenPort), + prometheusCfg.ListenAddr, + strconv.Itoa(prometheusCfg.ListenPort), ), endpoint, ), @@ -538,15 +539,15 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { // call pprof separately, one might fail for timeout - if err = cli.dumpPprof(ctx, zipWriter, "goroutine"); err != nil { + if err = cli.dumpPprof(ctx, zipWriter, *cfg.Prometheus, "goroutine"); err != nil { log.Warnf("could not collect pprof goroutine data: %s", err) } - if err = cli.dumpPprof(ctx, zipWriter, "heap"); err != nil { + if err = cli.dumpPprof(ctx, zipWriter, *cfg.Prometheus, "heap"); err != nil { log.Warnf("could not collect pprof heap data: %s", err) } - if err = cli.dumpPprof(ctx, zipWriter, "profile"); err != nil { + if err = cli.dumpPprof(ctx, zipWriter, *cfg.Prometheus, "profile"); err != nil { log.Warnf("could not collect pprof cpu data: %s", err) } From 9823ed61b694d7b65f79dfe354679593908d8272 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 3 Jun 2024 12:21:00 +0200 Subject: [PATCH 155/318] cscli: log.Fatal -> return err (#3049) * cscli: log.Fatal -> return err * lint --- cmd/crowdsec-cli/alerts_table.go | 8 ++++++ cmd/crowdsec-cli/capi.go | 10 ++------ cmd/crowdsec-cli/decisions_import.go | 13 +++++----- cmd/crowdsec-cli/doc.go | 2 +- cmd/crowdsec-cli/flag.go | 14 +++++----- cmd/crowdsec-cli/hub.go | 2 +- cmd/crowdsec-cli/itemcli.go | 26 +++++++++---------- cmd/crowdsec-cli/main.go | 38 ++++++++++++++++++---------- cmd/crowdsec-cli/notifications.go | 3 +++ cmd/crowdsec-cli/papi.go | 2 +- 10 files changed, 66 insertions(+), 52 deletions(-) diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/alerts_table.go index ec457f3723e..fbde4d2aaa9 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/alerts_table.go @@ -15,10 +15,12 @@ import ( func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { t := newTable(out) t.SetRowLines(false) + header := []string{"ID", "value", "reason", "country", "as", "decisions", "created_at"} if printMachine { header = append(header, "machine") } + t.SetHeaders(header...) for _, alertItem := range *alerts { @@ -54,20 +56,25 @@ func alertDecisionsTable(out io.Writer, alert *models.Alert) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("ID", "scope:value", "action", "expiration", "created_at") + for _, decision := range alert.Decisions { parsedDuration, err := time.ParseDuration(*decision.Duration) if err != nil { log.Error(err) } + expire := time.Now().UTC().Add(parsedDuration) if time.Now().UTC().After(expire) { continue } + foundActive = true scopeAndValue := *decision.Scope + if *decision.Value != "" { scopeAndValue += ":" + *decision.Value } + t.AddRow( strconv.Itoa(int(decision.ID)), scopeAndValue, @@ -76,6 +83,7 @@ func alertDecisionsTable(out io.Writer, alert *models.Alert) { alert.CreatedAt, ) } + if foundActive { fmt.Printf(" - Active Decisions :\n") t.Render() // Send output diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index f5940d71b66..7b1613c5463 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -48,11 +48,7 @@ func (cli *cliCapi) NewCommand() *cobra.Command { return err } - if err := require.CAPI(cfg); err != nil { - return err - } - - return nil + return require.CAPI(cfg) }, } @@ -147,9 +143,7 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { cmd.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") cmd.Flags().StringVar(&capiUserPrefix, "schmilblick", "", "set a schmilblick (use in tests only)") - if err := cmd.Flags().MarkHidden("schmilblick"); err != nil { - log.Fatalf("failed to hide flag: %s", err) - } + _ = cmd.Flags().MarkHidden("schmilblick") return cmd } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 8c36bd5dc92..1df9d1ae4a1 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -46,7 +46,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("unable to parse values: '%s'", err) + return nil, fmt.Errorf("unable to parse values: '%w'", err) } case "json": log.Infof("Parsing json") @@ -58,7 +58,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { log.Infof("Parsing csv") if err := csvutil.Unmarshal(content, &ret); err != nil { - return nil, fmt.Errorf("unable to parse csv: '%s'", err) + return nil, fmt.Errorf("unable to parse csv: '%w'", err) } default: return nil, fmt.Errorf("invalid format '%s', expected one of 'json', 'csv', 'values'", format) @@ -67,8 +67,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { return ret, nil } - -func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { +func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { flags := cmd.Flags() input, err := flags.GetString("input") @@ -146,13 +145,13 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } else { fin, err = os.Open(input) if err != nil { - return fmt.Errorf("unable to open %s: %s", input, err) + return fmt.Errorf("unable to open %s: %w", input, err) } } content, err = io.ReadAll(fin) if err != nil { - return fmt.Errorf("unable to read from %s: %s", input, err) + return fmt.Errorf("unable to read from %s: %w", input, err) } decisionsListRaw, err := parseDecisionList(content, format) @@ -243,7 +242,7 @@ func (cli *cliDecisions) newImportCmd() *cobra.Command { Long: "expected format:\n" + "csv : any of duration,reason,scope,type,value, with a header line\n" + "json :" + "`{" + `"duration" : "24h", "reason" : "my_scenario", "scope" : "ip", "type" : "ban", "value" : "x.y.z.z"` + "}`", - Args: cobra.NoArgs, + Args: cobra.NoArgs, DisableAutoGenTag: true, Example: `decisions.csv: duration,scope,value diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index 4b1d50d1583..db1e642115e 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -24,7 +24,7 @@ func (cli cliDoc) NewCommand(rootCmd *cobra.Command) *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := doc.GenMarkdownTreeCustom(rootCmd, "./doc/", cli.filePrepender, cli.linkHandler); err != nil { - return fmt.Errorf("failed to generate cobra doc: %s", err) + return fmt.Errorf("failed to generate cobra doc: %w", err) } return nil }, diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/flag.go index e85f33d4467..1780d08e5f7 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/flag.go @@ -9,21 +9,21 @@ import ( type MachinePassword string func (p *MachinePassword) String() string { - return string(*p) + return string(*p) } func (p *MachinePassword) Set(v string) error { // a password can't be more than 72 characters // due to bcrypt limitations - if len(v) > 72 { - return errors.New("password too long (max 72 characters)") - } + if len(v) > 72 { + return errors.New("password too long (max 72 characters)") + } - *p = MachinePassword(v) + *p = MachinePassword(v) - return nil + return nil } func (p *MachinePassword) Type() string { - return "string" + return "string" } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 62eb894d8f2..318dd018e06 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct{ +type cliHub struct { cfg configGetter } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index a72cfa735c5..e1a908f3492 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -44,8 +44,8 @@ type cliItem struct { func (cli cliItem) NewCommand() *cobra.Command { cmd := &cobra.Command{ - Use: coalesce.String(cli.help.use, fmt.Sprintf("%s [item]...", cli.name)), - Short: coalesce.String(cli.help.short, fmt.Sprintf("Manage hub %s", cli.name)), + Use: coalesce.String(cli.help.use, cli.name+" [item]..."), + Short: coalesce.String(cli.help.short, "Manage hub "+cli.name), Long: cli.help.long, Example: cli.help.example, Args: cobra.MinimumNArgs(1), @@ -106,7 +106,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.installHelp.use, "install [item]..."), - Short: coalesce.String(cli.installHelp.short, fmt.Sprintf("Install given %s", cli.oneOrMore)), + Short: coalesce.String(cli.installHelp.short, "Install given "+cli.oneOrMore), Long: coalesce.String(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), Example: cli.installHelp.example, Args: cobra.MinimumNArgs(1), @@ -122,7 +122,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { flags := cmd.Flags() flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") - flags.BoolVar(&ignoreError, "ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name)) + flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) return cmd } @@ -232,8 +232,8 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.removeHelp.use, "remove [item]..."), - Short: coalesce.String(cli.removeHelp.short, fmt.Sprintf("Remove given %s", cli.oneOrMore)), - Long: coalesce.String(cli.removeHelp.long, fmt.Sprintf("Remove one or more %s", cli.name)), + Short: coalesce.String(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: coalesce.String(cli.removeHelp.long, "Remove one or more "+cli.name), Example: cli.removeHelp.example, Aliases: []string{"delete"}, DisableAutoGenTag: true, @@ -248,7 +248,7 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { flags := cmd.Flags() flags.BoolVar(&purge, "purge", false, "Delete source file too") flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") - flags.BoolVar(&all, "all", false, fmt.Sprintf("Remove all the %s", cli.name)) + flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) return cmd } @@ -328,7 +328,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."), - Short: coalesce.String(cli.upgradeHelp.short, fmt.Sprintf("Upgrade given %s", cli.oneOrMore)), + Short: coalesce.String(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), Example: cli.upgradeHelp.example, DisableAutoGenTag: true, @@ -341,7 +341,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { } flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name)) + flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") return cmd @@ -405,8 +405,8 @@ func (cli cliItem) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."), - Short: coalesce.String(cli.inspectHelp.short, fmt.Sprintf("Inspect given %s", cli.oneOrMore)), - Long: coalesce.String(cli.inspectHelp.long, fmt.Sprintf("Inspect the state of one or more %s", cli.name)), + Short: coalesce.String(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: coalesce.String(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), Example: cli.inspectHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, @@ -454,8 +454,8 @@ func (cli cliItem) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"), - Short: coalesce.String(cli.listHelp.short, fmt.Sprintf("List %s", cli.oneOrMore)), - Long: coalesce.String(cli.listHelp.long, fmt.Sprintf("List of installed/available/specified %s", cli.name)), + Short: coalesce.String(cli.listHelp.short, "List "+cli.oneOrMore), + Long: coalesce.String(cli.listHelp.long, "List of installed/available/specified "+cli.name), Example: cli.listHelp.example, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 622fe50a09d..a712960810a 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -98,14 +98,14 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { } // initialize is called before the subcommand is executed. -func (cli *cliRoot) initialize() { +func (cli *cliRoot) initialize() error { var err error log.SetLevel(cli.wantedLogLevel()) csConfig, mergedConfig, err = loadConfigFor(os.Args[1]) if err != nil { - log.Fatal(err) + return err } // recap of the enabled feature flags, because logging @@ -127,7 +127,7 @@ func (cli *cliRoot) initialize() { } if csConfig.Cscli.Output != "human" && csConfig.Cscli.Output != "json" && csConfig.Cscli.Output != "raw" { - log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) + return fmt.Errorf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) @@ -143,9 +143,11 @@ func (cli *cliRoot) initialize() { csConfig.Cscli.Color = cli.outputColor if cli.outputColor != "yes" && cli.outputColor != "no" && cli.outputColor != "auto" { - log.Fatalf("output color %s unknown", cli.outputColor) + return fmt.Errorf("output color '%s' not supported: must be one of yes, no, auto", cli.outputColor) } } + + return nil } // list of valid subcommands for the shell completion @@ -174,17 +176,17 @@ func (cli *cliRoot) colorize(cmd *cobra.Command) { cmd.SetOut(color.Output) } -func (cli *cliRoot) NewCommand() *cobra.Command { +func (cli *cliRoot) NewCommand() (*cobra.Command, error) { // set the formatter asap and worry about level later logFormatter := &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true} log.SetFormatter(logFormatter) if err := fflag.RegisterAllFeatures(); err != nil { - log.Fatalf("failed to register features: %s", err) + return nil, fmt.Errorf("failed to register features: %w", err) } if err := csconfig.LoadFeatureFlagsEnv(log.StandardLogger()); err != nil { - log.Fatalf("failed to set feature flags from env: %s", err) + return nil, fmt.Errorf("failed to set feature flags from env: %w", err) } cmd := &cobra.Command{ @@ -217,9 +219,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall pflags.BoolVar(&cli.logTrace, "trace", false, "Set logging to trace") pflags.StringVar(&cli.flagBranch, "branch", "", "Override hub branch on github") - if err := pflags.MarkHidden("branch"); err != nil { - log.Fatalf("failed to hide flag: %s", err) - } + _ = pflags.MarkHidden("branch") // Look for "-c /path/to/config.yaml" // This duplicates the logic in cobra, but we need to do it before @@ -233,11 +233,17 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall } if err := csconfig.LoadFeatureFlagsFile(ConfigFilePath, log.StandardLogger()); err != nil { - log.Fatal(err) + return nil, err } if len(os.Args) > 1 { - cobra.OnInitialize(cli.initialize) + cobra.OnInitialize( + func() { + if err := cli.initialize(); err != nil { + log.Fatal(err) + } + }, + ) } cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) @@ -272,11 +278,15 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) } - return cmd + return cmd, nil } func main() { - cmd := newCliRoot().NewCommand() + cmd, err := newCliRoot().NewCommand() + if err != nil { + log.Fatal(err) + } + if err := cmd.Execute(); err != nil { log.Fatal(err) } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 84dd6b941c9..d678bf0e306 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -250,12 +250,15 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ if err != nil { return nil, cobra.ShellCompDirectiveError } + var ret []string + for k := range ncfgs { if strings.Contains(k, toComplete) && !slices.Contains(args, k) { ret = append(ret, k) } } + return ret, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 558409b2d4d..bea03c12848 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -142,7 +142,7 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { apic.Shutdown() papi.Shutdown() t.Wait() - time.Sleep(5 * time.Second) //FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done return nil }, From 7313d491450974f8595db6afe209a2bcccddb70a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 09:47:25 +0200 Subject: [PATCH 156/318] enable linter: revive(bool-literal-in-expr) (#2983) --- .golangci.yml | 2 -- pkg/acquisition/modules/docker/docker_test.go | 4 ++-- pkg/dumps/parser_dump.go | 2 +- pkg/leakybucket/buckets_test.go | 2 +- pkg/leakybucket/overflows.go | 2 +- pkg/parser/node_test.go | 8 ++++---- pkg/parser/parsing_test.go | 4 ++-- 7 files changed, 11 insertions(+), 13 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 34393fce56e..61278185633 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -120,8 +120,6 @@ linters-settings: disabled: true - name: blank-imports disabled: true - - name: bool-literal-in-expr - disabled: true - name: cognitive-complexity disabled: true - name: comment-spacings diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index c2624229afd..bcf0966a2d1 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -228,7 +228,7 @@ container_name_regexp: } func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) { - if readLogs == true { + if readLogs { return []dockerTypes.Container{}, nil } @@ -243,7 +243,7 @@ func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes } func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) { - if readLogs == true { + if readLogs { return io.NopCloser(strings.NewReader("")), nil } diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 7d4e09ecf1a..d43f3cdc1b9 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -235,7 +235,7 @@ func (t *tree) displayResults(opts DumpOpts) { case "update": detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s : %s -> %s\n", presep, sep, change.Type, strings.Join(change.Path, "."), change.From, yellow(change.To)) - if change.Path[0] == "Whitelisted" && change.To == true { + if change.Path[0] == "Whitelisted" && change.To == true { //nolint:revive whitelisted = true if whitelistReason == "" { diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 9e7205e8613..ad0d1a79879 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -88,7 +88,7 @@ func TestBucket(t *testing.T) { // we want to avoid the death of the tomb because all existing buckets have been destroyed. func watchTomb(tomb *tomb.Tomb) { for { - if tomb.Alive() == false { + if !tomb.Alive() { log.Warning("Tomb is dead") break } diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 8092ef35e77..3ee067177ef 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -341,7 +341,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { srcCopy := srcValue newApiAlert.Source = &srcCopy - if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == true { + if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == true { //nolint:revive newApiAlert.Remediation = true } diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go index be12176bff9..76d35a9ffb0 100644 --- a/pkg/parser/node_test.go +++ b/pkg/parser/node_test.go @@ -49,18 +49,18 @@ func TestParserConfigs(t *testing.T) { } for idx := range CfgTests { err := CfgTests[idx].NodeCfg.compile(pctx, EnricherCtx{}) - if CfgTests[idx].Compiles == true && err != nil { + if CfgTests[idx].Compiles && err != nil { t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } - if CfgTests[idx].Compiles == false && err == nil { + if !CfgTests[idx].Compiles && err == nil { t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests)) } err = CfgTests[idx].NodeCfg.validate(EnricherCtx{}) - if CfgTests[idx].Valid == true && err != nil { + if CfgTests[idx].Valid && err != nil { t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } - if CfgTests[idx].Valid == false && err == nil { + if !CfgTests[idx].Valid && err == nil { t.Fatalf("Valid: (%d/%d) expected error", idx+1, len(CfgTests)) } } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index d97dce8d20f..902a3cccbc7 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -130,7 +130,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } for n := 0; n < count; n++ { - if testFile(tests, *pctx, pnodes) != true { + if !testFile(tests, *pctx, pnodes) { return errors.New("test failed !") } } @@ -308,7 +308,7 @@ reCheck: for ridx, result := range results { for eidx, expected := range testSet.Results { explain, match := matchEvent(expected, result, debug) - if match == true { + if match { log.Infof("expected %d/%d matches result %d/%d", eidx, len(testSet.Results), ridx, len(results)) if len(explain) > 0 { log.Printf("-> %s", explain[len(explain)-1]) From fba4237fbd2fdbe4751b6880b95001af31c2ace0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 09:48:21 +0200 Subject: [PATCH 157/318] lint: revive/useless-break (#3050) --- cmd/crowdsec/parse.go | 5 +---- pkg/csconfig/simulation.go | 3 +-- pkg/csplugin/broker.go | 3 +-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index 53c9ee65d4f..39eedfc858e 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -11,12 +11,11 @@ import ( ) func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { -LOOP: for { select { case <-parsersTomb.Dying(): log.Infof("Killing parser routines") - break LOOP + return nil case event := <-input: if !event.Process { continue @@ -55,6 +54,4 @@ LOOP: output <- parsed } } - - return nil } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index bf121ef56f9..947b47e3c1e 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -26,8 +26,7 @@ func (s *SimulationConfig) IsSimulated(scenario string) bool { for _, excluded := range s.Exclusions { if excluded == scenario { - simulated = !simulated - break + return !simulated } } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index b5c86f224ab..f6629b2609e 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -103,7 +103,6 @@ func (pb *PluginBroker) Kill() { func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { //we get signaled via the channel when notifications need to be delivered to plugin (via the watcher) pb.watcher.Start(&tomb.Tomb{}) -loop: for { select { case profileAlert := <-pb.PluginChannel: @@ -137,7 +136,7 @@ loop: case <-pb.watcher.tomb.Dead(): log.Info("killing all plugins") pb.Kill() - break loop + return case pluginName := <-pb.watcher.PluginEvents: // this can be run in goroutine, but then locks will be needed pluginMutex.Lock() From 9101de95d8f88df8a3cc77cdedeb062fa167040e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 13:19:46 +0200 Subject: [PATCH 158/318] pkg/database: refactor & rename "soft delete" to "expire" (#3025) * pkg/database: replace parameter with explicit methods * merge methods with recusion * rename "soft delete" -> "expire" * lint --- pkg/apiserver/apic.go | 8 +- pkg/apiserver/controllers/v1/decisions.go | 4 +- pkg/apiserver/papi_cmd.go | 29 +++-- pkg/database/alerts.go | 12 +- pkg/database/decisions.go | 147 ++++++++++++++-------- 5 files changed, 124 insertions(+), 76 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index b6c560c0a5d..68dc94367e2 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -432,9 +432,9 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet filter["scopes"] = []string{*decision.Scope} } - dbCliRet, _, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) if err != nil { - return 0, fmt.Errorf("deleting decisions error: %w", err) + return 0, fmt.Errorf("expiring decisions error: %w", err) } dbCliDel, err := strconv.Atoi(dbCliRet) @@ -464,9 +464,9 @@ func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisi filter["scopes"] = []string{*scope} } - dbCliRet, _, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) if err != nil { - return 0, fmt.Errorf("deleting decisions error: %w", err) + return 0, fmt.Errorf("expiring decisions error: %w", err) } dbCliDel, err := strconv.Atoi(dbCliRet) diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 5cc7628458c..543c832095a 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -91,7 +91,7 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { return } - nbDeleted, deletedFromDB, err := c.DBClient.SoftDeleteDecisionByID(decisionID) + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(decisionID) if err != nil { c.HandleDBErrors(gctx, err) @@ -113,7 +113,7 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { } func (c *Controller) DeleteDecisions(gctx *gin.Context) { - nbDeleted, deletedFromDB, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query()) + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index fb76223b9a0..a1137161698 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -63,10 +63,10 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { filter := make(map[string][]string) filter["uuid"] = UUIDs - _, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) if err != nil { - return fmt.Errorf("unable to delete decisions %+v: %w", UUIDs, err) + return fmt.Errorf("unable to expire decisions %+v: %w", UUIDs, err) } decisions := make([]*models.Decision, 0) @@ -130,12 +130,13 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { alert.Scenario = ptr.Of("") alert.Source = &models.Source{} - //if we're setting Source.Scope to types.ConsoleOrigin, it messes up the alert's value + // if we're setting Source.Scope to types.ConsoleOrigin, it messes up the alert's value if len(alert.Decisions) >= 1 { alert.Source.Scope = alert.Decisions[0].Scope alert.Source.Value = alert.Decisions[0].Value } else { log.Warningf("No decision found in alert for Polling API (%s : %s)", message.Header.Source.User, message.Header.Message) + alert.Source.Scope = ptr.Of(types.ConsoleOrigin) alert.Source.Value = &message.Header.Source.User } @@ -150,7 +151,7 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { log.Infof("Adding decision for '%s' with UUID: %s", *decision.Value, decision.UUID) } - //use a different method : alert and/or decision might already be partially present in the database + // use a different method: alert and/or decision might already be partially present in the database _, err = p.DBClient.CreateOrUpdateAlert("", alert) if err != nil { log.Errorf("Failed to create alerts in DB: %s", err) @@ -172,31 +173,33 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { } switch message.Header.OperationCmd { - case "blocklist_unsubscribe": data, err := json.Marshal(message.Data) if err != nil { return err } + unsubscribeMsg := listUnsubscribe{} if err := json.Unmarshal(data, &unsubscribeMsg); err != nil { - return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err) + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } + if unsubscribeMsg.Name == "" { return fmt.Errorf("message for '%s' contains bad data format: missing blocklist name", message.Header.OperationType) } + p.Logger.Infof("Received blocklist_unsubscribe command from PAPI, unsubscribing from blocklist %s", unsubscribeMsg.Name) filter := make(map[string][]string) filter["origin"] = []string{types.ListOrigin} filter["scenario"] = []string{unsubscribeMsg.Name} - _, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) if err != nil { - return fmt.Errorf("unable to delete decisions for list %s : %w", unsubscribeMsg.Name, err) + return fmt.Errorf("unable to expire decisions for list %s : %w", unsubscribeMsg.Name, err) } - p.Logger.Infof("deleted %d decisions for list %s", len(deletedDecisions), unsubscribeMsg.Name) + p.Logger.Infof("deleted %d decisions for list %s", len(deletedDecisions), unsubscribeMsg.Name) case "reauth": p.Logger.Infof("Received reauth command from PAPI, resetting token") p.apiClient.GetClient().Transport.(*apiclient.JWTTransport).ResetToken() @@ -205,19 +208,23 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { if err != nil { return err } + forcePullMsg := forcePull{} + if err := json.Unmarshal(data, &forcePullMsg); err != nil { - return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err) + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } if forcePullMsg.Blocklist == nil { p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") + err = p.apic.PullTop(true) if err != nil { - return fmt.Errorf("failed to force pull operation: %s", err) + return fmt.Errorf("failed to force pull operation: %w", err) } } else { p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) + err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 602846eb31a..37e9397709e 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -27,10 +27,10 @@ import ( ) const ( - paginationSize = 100 // used to queryAlert to avoid 'too many SQL variable' - defaultLimit = 100 // default limit of element to returns when query alerts - bulkSize = 50 // bulk size when create alerts - maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered + paginationSize = 100 // used to queryAlert to avoid 'too many SQL variable' + defaultLimit = 100 // default limit of element to returns when query alerts + alertCreateBulkSize = 50 // bulk size when create alerts + maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) func formatAlertCN(source models.Source) string { @@ -796,7 +796,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str c.Log.Debugf("writing %d items", len(alertList)) - alertChunks := slicetools.Chunks(alertList, bulkSize) + alertChunks := slicetools.Chunks(alertList, alertCreateBulkSize) alertIDs := []string{} for _, alertChunk := range alertChunks { @@ -1117,7 +1117,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, if limit == 0 { limit, err = alerts.Count(c.CTX) if err != nil { - return nil, fmt.Errorf("unable to count nb alerts: %s", err) + return nil, fmt.Errorf("unable to count nb alerts: %w", err) } } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 941fc5c7791..294515d603e 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -17,6 +17,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +const decisionDeleteBulkSize = 256 // scientifically proven to be the best value for bulk delete + type DecisionsByScenario struct { Scenario string Count int @@ -109,23 +111,25 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] query = query.Where(decision.IDGT(id)) } } + query, err = applyStartIpEndIpFilter(query, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return nil, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) } + return query, nil } + func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) - //Allow a bouncer to ask for non-deduplicated results + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } query, err := BuildDecisionRequestWithFilter(query, filters) - if err != nil { c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") @@ -138,6 +142,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") } + return data, nil } @@ -145,7 +150,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) - //Allow a bouncer to ask for non-deduplicated results + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } @@ -158,11 +163,13 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get expired decisions with filters") } + data, err := query.All(c.CTX) if err != nil { c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") } + return data, nil } @@ -170,8 +177,8 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) - query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string)) + query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string)) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") @@ -180,7 +187,6 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) var r []*DecisionsByScenario err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(c.CTX, &r) - if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") @@ -253,10 +259,11 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters decision.UntilLT(time.Now().UTC()), decision.UntilGT(since), ) - //Allow a bouncer to ask for non-deduplicated results + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } + query, err := BuildDecisionRequestWithFilter(query, filters) if err != nil { c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) @@ -309,7 +316,7 @@ func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) } - count, err := c.BulkDeleteDecisions(toDelete, false) + count, err := c.DeleteDecisions(toDelete) c.Log.Debugf("deleted %d decisions", count) return toDelete, err @@ -424,7 +431,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } - count, err := c.BulkDeleteDecisions(toDelete, false) + count, err := c.DeleteDecisions(toDelete) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") @@ -433,8 +440,8 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return strconv.Itoa(count), toDelete, nil } -// SoftDeleteDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items -func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +// ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items +func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -545,64 +552,98 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri DecisionsToDelete, err := decisions.All(c.CTX) if err != nil { - c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err) - return "0", nil, errors.Wrap(DeleteFail, "soft delete decisions with provided filter") + c.Log.Warningf("ExpireDecisionsWithFilter : %s", err) + return "0", nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") } - count, err := c.BulkDeleteDecisions(DecisionsToDelete, true) + count, err := c.ExpireDecisions(DecisionsToDelete) if err != nil { - return "0", nil, errors.Wrapf(DeleteFail, "soft delete decisions with provided filter : %s", err) + return "0", nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) } return strconv.Itoa(count), DecisionsToDelete, err } -// BulkDeleteDecisions sets the expiration of a bulk of decisions to now() or hard deletes them. -// We are doing it this way so we can return impacted decisions for sync with CAPI/PAPI -func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDelete bool) (int, error) { - const bulkSize = 256 // scientifically proven to be the best value for bulk delete +func decisionIDs(decisions []*ent.Decision) []int { + ids := make([]int, len(decisions)) + for i, d := range decisions { + ids[i] = d.ID + } - var ( - nbUpdates int - err error - totalUpdates = 0 - ) + return ids +} - idsToDelete := make([]int, len(decisionsToDelete)) - for i, decision := range decisionsToDelete { - idsToDelete[i] = decision.ID +// ExpireDecisions sets the expiration of a list of decisions to now() +// It returns the number of impacted decisions for the CAPI/PAPI +func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { + if len(decisions) <= decisionDeleteBulkSize { + ids := decisionIDs(decisions) + + rows, err := c.Ent.Decision.Update().Where( + decision.IDIn(ids...), + ).SetUntil(time.Now().UTC()).Save(c.CTX) + if err != nil { + return 0, fmt.Errorf("expire decisions with provided filter: %w", err) + } + + return rows, nil } - for _, chunk := range slicetools.Chunks(idsToDelete, bulkSize) { - if softDelete { - nbUpdates, err = c.Ent.Decision.Update().Where( - decision.IDIn(chunk...), - ).SetUntil(time.Now().UTC()).Save(c.CTX) - if err != nil { - return totalUpdates, fmt.Errorf("soft delete decisions with provided filter: %w", err) - } - } else { - nbUpdates, err = c.Ent.Decision.Delete().Where( - decision.IDIn(chunk...), - ).Exec(c.CTX) - if err != nil { - return totalUpdates, fmt.Errorf("hard delete decisions with provided filter: %w", err) - } + // big batch, let's split it and recurse + + total := 0 + + for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { + rows, err := c.ExpireDecisions(chunk) + if err != nil { + return total, err } - totalUpdates += nbUpdates + total += rows } - return totalUpdates, nil + return total, nil } -// SoftDeleteDecisionByID set the expiration of a decision to now() -func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, []*ent.Decision, error) { +// DeleteDecisions removes a list of decisions from the database +// It returns the number of impacted decisions for the CAPI/PAPI +func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { + if len(decisions) < decisionDeleteBulkSize { + ids := decisionIDs(decisions) + + rows, err := c.Ent.Decision.Delete().Where( + decision.IDIn(ids...), + ).Exec(c.CTX) + if err != nil { + return 0, fmt.Errorf("hard delete decisions with provided filter: %w", err) + } + + return rows, nil + } + + // big batch, let's split it and recurse + + tot := 0 + + for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { + rows, err := c.DeleteDecisions(chunk) + if err != nil { + return tot, err + } + + tot += rows + } + + return tot, nil +} + +// ExpireDecision set the expiration of a decision to now() +func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error) { toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) // XXX: do we want 500 or 404 here? if err != nil || len(toUpdate) == 0 { - c.Log.Warningf("SoftDeleteDecisionByID : %v (nb soft deleted: %d)", err, len(toUpdate)) + c.Log.Warningf("ExpireDecisionByID : %v (nb expired: %d)", err, len(toUpdate)) return 0, nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) } @@ -610,7 +651,8 @@ func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, []*ent.Decision, e return 0, nil, ItemNotFound } - count, err := c.BulkDeleteDecisions(toUpdate, true) + count, err := c.ExpireDecisions(toUpdate) + return count, toUpdate, err } @@ -618,8 +660,8 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) } @@ -644,10 +686,10 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { - return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + return 0, fmt.Errorf("unable to convert '%s' to int: %w", decisionValue, err) } contains := true @@ -672,10 +714,10 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { - return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + return 0, fmt.Errorf("unable to convert '%s' to int: %w", decisionValue, err) } contains := true @@ -704,7 +746,6 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) - if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) } From d614ecbdcf8dbc88dfbcf505fde0f7de030474f7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:09:19 +0200 Subject: [PATCH 159/318] CI/bats: test remediation api via socket (#3052) --- test/bats/97_ipv4_single.bats | 19 ++++--------- test/bats/97_ipv6_single.bats | 23 ++++++--------- test/bats/98_ipv4_range.bats | 23 ++++++--------- test/bats/98_ipv6_range.bats | 31 ++++++++------------- test/bats/99_lapi-stream-mode-scenario.bats | 29 ++++++++----------- test/bats/99_lapi-stream-mode-scopes.bats | 15 +++------- test/bats/99_lapi-stream-mode.bats | 15 +++------- test/lib/setup_file.sh | 14 ++++++++++ 8 files changed, 67 insertions(+), 102 deletions(-) diff --git a/test/bats/97_ipv4_single.bats b/test/bats/97_ipv4_single.bats index 1ada1c4646b..f02d9ebd503 100644 --- a/test/bats/97_ipv4_single.bats +++ b/test/bats/97_ipv4_single.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.3; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -37,7 +30,7 @@ api() { } @test "API - first decisions list: must be empty" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' assert_output 'null' } @@ -53,7 +46,7 @@ api() { } @test "API - all decisions" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' rune -0 jq -c '[ . | length, .[0].value ]' <(output) assert_output '[1,"1.2.3.4"]' } @@ -67,7 +60,7 @@ api() { } @test "API - decision for 1.2.3.4" { - rune -0 api '/v1/decisions?ip=1.2.3.4' + rune -0 lapi-get '/v1/decisions?ip=1.2.3.4' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } @@ -78,7 +71,7 @@ api() { } @test "API - decision for 1.2.3.5" { - rune -0 api '/v1/decisions?ip=1.2.3.5' + rune -0 lapi-get '/v1/decisions?ip=1.2.3.5' assert_output 'null' } @@ -90,7 +83,7 @@ api() { } @test "API - decision for 1.2.3.0/24" { - rune -0 api '/v1/decisions?range=1.2.3.0/24' + rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24' assert_output 'null' } @@ -101,7 +94,7 @@ api() { } @test "API - decisions where IP in 1.2.3.0/24" { - rune -0 api '/v1/decisions?range=1.2.3.0/24&contains=false' + rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index 982976d70ed..3cb1cedd0c2 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.5; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -48,7 +41,7 @@ api() { } @test "API - all decisions" { - rune -0 api "/v1/decisions" + rune -0 lapi-get "/v1/decisions" rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -60,7 +53,7 @@ api() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:888" { - rune -0 api '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -71,7 +64,7 @@ api() { } @test "API - decisions for ip 1211:2222:3333:4444:5555:6666:7777:888" { - rune -0 api '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' assert_output 'null' } @@ -81,7 +74,7 @@ api() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8887" { - rune -0 api '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' + rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -91,7 +84,7 @@ api() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -102,7 +95,7 @@ api() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -113,7 +106,7 @@ api() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' assert_output 'null' } @@ -124,7 +117,7 @@ api() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } diff --git a/test/bats/98_ipv4_range.bats b/test/bats/98_ipv4_range.bats index b0f6f482944..a58f144b885 100644 --- a/test/bats/98_ipv4_range.bats +++ b/test/bats/98_ipv4_range.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.3; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -48,7 +41,7 @@ api() { } @test "API - all decisions" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -62,7 +55,7 @@ api() { } @test "API - decisions for ip 4.4.4." { - rune -0 api '/v1/decisions?ip=4.4.4.3' + rune -0 lapi-get '/v1/decisions?ip=4.4.4.3' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -73,7 +66,7 @@ api() { } @test "API - decisions for ip contained in 4.4.4." { - rune -0 api '/v1/decisions?ip=4.4.4.4&contains=false' + rune -0 lapi-get '/v1/decisions?ip=4.4.4.4&contains=false' assert_output 'null' } @@ -83,7 +76,7 @@ api() { } @test "API - decisions for ip 5.4.4." { - rune -0 api '/v1/decisions?ip=5.4.4.3' + rune -0 lapi-get '/v1/decisions?ip=5.4.4.3' assert_output 'null' } @@ -93,7 +86,7 @@ api() { } @test "API - decisions for range 4.4.0.0/1" { - rune -0 api '/v1/decisions?range=4.4.0.0/16' + rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16' assert_output 'null' } @@ -104,7 +97,7 @@ api() { } @test "API - decisions for ip/range in 4.4.0.0/1" { - rune -0 api '/v1/decisions?range=4.4.0.0/16&contains=false' + rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -118,7 +111,7 @@ api() { } @test "API - decisions for range 4.4.4.2/2" { - rune -0 api '/v1/decisions?range=4.4.4.2/28' + rune -0 lapi-get '/v1/decisions?range=4.4.4.2/28' rune -0 jq -r '.[].value' <(output) assert_output '4.4.4.0/24' } @@ -129,6 +122,6 @@ api() { } @test "API - decisions for range 4.4.3.2/2" { - rune -0 api '/v1/decisions?range=4.4.3.2/28' + rune -0 lapi-get '/v1/decisions?range=4.4.3.2/28' assert_output 'null' } diff --git a/test/bats/98_ipv6_range.bats b/test/bats/98_ipv6_range.bats index d3c347583da..065f32b74ba 100644 --- a/test/bats/98_ipv6_range.bats +++ b/test/bats/98_ipv6_range.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.3; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -48,7 +41,7 @@ api() { } @test "API - all decisions (2)" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -62,7 +55,7 @@ api() { } @test "API - decisions for ip aaaa:2222:3333:4444:5555:6666:7777:8888" { - rune -0 api '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -73,7 +66,7 @@ api() { } @test "API - decisions for ip aaaa:2222:3333:4445:5555:6666:7777:8888" { - rune -0 api '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' assert_output 'null' } @@ -83,7 +76,7 @@ api() { } @test "API - decisions for ip aaa1:2222:3333:4444:5555:6666:7777:8887" { - rune -0 api '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' + rune -0 lapi-get '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -96,7 +89,7 @@ api() { } @test "API - decisions for range aaaa:2222:3333:4444:5555::/80" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -108,7 +101,7 @@ api() { } @test "API - decisions for range aaaa:2222:3333:4441:5555::/80" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' assert_output 'null' } @@ -118,7 +111,7 @@ api() { } @test "API - decisions for range aaa1:2222:3333:4444:5555::/80" { - rune -0 api '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' + rune -0 lapi-get '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' assert_output 'null' } @@ -130,7 +123,7 @@ api() { } @test "API - decisions for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -141,7 +134,7 @@ api() { } @test "API - decisions for ip/range in aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -152,7 +145,7 @@ api() { } @test "API - decisions for ip/range in aaaa:2222:3333:4445:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' assert_output 'null' } @@ -170,7 +163,7 @@ api() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { - rune -0 api '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' rune -0 jq -r '.[].value' <(output) assert_output 'bbbb:db8::/81' } @@ -181,7 +174,7 @@ api() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { - rune -0 api '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' assert_output 'null' } diff --git a/test/bats/99_lapi-stream-mode-scenario.bats b/test/bats/99_lapi-stream-mode-scenario.bats index 9b4d562f3c9..1cd44c1ae80 100644 --- a/test/bats/99_lapi-stream-mode-scenario.bats +++ b/test/bats/99_lapi-stream-mode-scenario.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -24,11 +22,6 @@ setup() { #---------- -api() { - URI="$1" - curl -s -H "X-Api-Key:${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - output_new_decisions() { jq -c '.new | map(select(.origin!="CAPI")) | .[] | del(.id) | (.. | .duration?) |= capture("(?[[:digit:]]+h[[:digit:]]+m)").d' <(output) | sort } @@ -62,7 +55,7 @@ output_new_decisions() { } @test "test startup" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -71,7 +64,7 @@ output_new_decisions() { } @test "test startup with scenarios containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -80,7 +73,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -89,12 +82,12 @@ output_new_decisions() { } @test "test startup with unknown scenarios containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=unknown" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=unknown" assert_output '{"deleted":null,"new":null}' } @test "test startup with scenarios containing and not containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -103,7 +96,7 @@ output_new_decisions() { } @test "test startup with scenarios containing and not containing 2" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -111,7 +104,7 @@ output_new_decisions() { } @test "test startup with scenarios not containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -120,7 +113,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios not containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -128,7 +121,7 @@ output_new_decisions() { } @test "test startup with origins parameter" { - rune -0 api "/v1/decisions/stream?startup=true&origins=another_origin" + rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"1h59m","origin":"another_origin","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -137,7 +130,7 @@ output_new_decisions() { } @test "test startup with multiple origins parameter" { - rune -0 api "/v1/decisions/stream?startup=true&origins=another_origin,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -146,7 +139,7 @@ output_new_decisions() { } @test "test startup with unknown origins" { - rune -0 api "/v1/decisions/stream?startup=true&origins=unknown" + rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=unknown" assert_output '{"deleted":null,"new":null}' } diff --git a/test/bats/99_lapi-stream-mode-scopes.bats b/test/bats/99_lapi-stream-mode-scopes.bats index a1d01c489e6..a9ed494e69c 100644 --- a/test/bats/99_lapi-stream-mode-scopes.bats +++ b/test/bats/99_lapi-stream-mode-scopes.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -23,11 +21,6 @@ setup() { #---------- -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - @test "adding decisions for multiple scopes" { rune -0 cscli decisions add -i '1.2.3.6' assert_stderr --partial 'Decision successfully added' @@ -36,28 +29,28 @@ api() { } @test "stream start (implicit ip scope)" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (explicit ip scope)" { - rune -0 api "/v1/decisions/stream?startup=true&scopes=ip" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (user scope)" { - rune -0 api "/v1/decisions/stream?startup=true&scopes=user" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user" rune -0 jq -r '.new' <(output) refute_output --partial '1.2.3.6' assert_output --partial 'toto' } @test "stream start (user+ip scope)" { - rune -0 api "/v1/decisions/stream?startup=true&scopes=user,ip" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user,ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' assert_output --partial 'toto' diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index 08ddde42c5f..c683a6150f4 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -23,11 +21,6 @@ setup() { #---------- -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - @test "adding decisions for multiple ips" { rune -0 cscli decisions add -i '1111:2222:3333:4444:5555:6666:7777:8888' assert_stderr --partial 'Decision successfully added' @@ -38,7 +31,7 @@ api() { } @test "stream start" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" if is_db_mysql; then sleep 3; fi rune -0 jq -r '.new' <(output) assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' @@ -49,7 +42,7 @@ api() { @test "stream cont (add)" { rune -0 cscli decisions add -i '1.2.3.5' if is_db_mysql; then sleep 3; fi - rune -0 api "/v1/decisions/stream" + rune -0 lapi-get "/v1/decisions/stream" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.5' } @@ -57,13 +50,13 @@ api() { @test "stream cont (del)" { rune -0 cscli decisions delete -i '1.2.3.4' if is_db_mysql; then sleep 3; fi - rune -0 api "/v1/decisions/stream" + rune -0 lapi-get "/v1/decisions/stream" rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' } @test "stream restart" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" api_out=${output} rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 1aca32fa6d0..3e6db0f12ff 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -276,3 +276,17 @@ rune() { run --separate-stderr "$@" } export -f rune + +# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) +lapi-get() { + [[ -z "$1" ]] && { fail "lapi-get: missing path"; } + [[ -z "$API_KEY" ]] && { fail "lapi-get: missing API_KEY"; } + local socket + socket=$(config_get '.api.server.listen_socket') + [[ -z "$socket" ]] && { fail "lapi-get: missing .api.server.listen_socket"; } + + # curl needs a fake hostname when using a unix socket + curl -s -f -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" +} +export -f lapi-get + From f0f27ee5cbb13fa192e103238f818aade84640d7 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 4 Jun 2024 22:39:13 +0200 Subject: [PATCH 160/318] add decently new version of python for centos 7 and 8 for testing (#3053) * add decently new version of python for centos 7 and 8 * update ansible requirements --- test/ansible/provision_dependencies.yml | 47 +++++++++++++++++++++++++ test/ansible/requirements.yml | 2 +- test/ansible/vars/python.yml | 1 + 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 test/ansible/vars/python.yml diff --git a/test/ansible/provision_dependencies.yml b/test/ansible/provision_dependencies.yml index bcfe8fccafb..144adf8ca36 100644 --- a/test/ansible/provision_dependencies.yml +++ b/test/ansible/provision_dependencies.yml @@ -1,6 +1,40 @@ # vim: set ft=yaml.ansible: --- +- name: "Fix EOL'd centos Stream 8" + hosts: all + tasks: + - name: "update repositories file" + ansible.builtin.find: + paths: /etc/yum.repos.d + patterns: "*.repo" + register: "repo_files" + when: + - ansible_facts.distribution == "CentOS" + - ansible_facts.distribution_major_version == '8' + - name: Replace old text with new text + become: true + ansible.builtin.replace: + path: "{{ item.path }}" + regexp: 'mirrorlist' + replace: '#mirrorlist' + loop: "{{ repo_files.files }}" + when: + - ansible_facts.distribution == "CentOS" + - ansible_facts.distribution_major_version == '8' + - repo_files.matched > 0 + - name: Replace old text with new text + become: true + ansible.builtin.replace: + path: "{{ item.path }}" + regexp: '#baseurl=http://mirror.centos.org' + replace: 'baseurl=https://vault.centos.org' + loop: "{{ repo_files.files }}" + when: + - ansible_facts.distribution == "CentOS" + - ansible_facts.distribution_major_version == '8' + - repo_files.matched > 0 + - name: "Install required packages" hosts: all vars_files: @@ -17,6 +51,19 @@ - crowdsecurity.testing.re2 - crowdsecurity.testing.bats_requirements +- name: "Install recent python" + hosts: all + vars_files: + - vars/python.yml + tasks: + - name: role "crowdsecurity.testing.python3" + ansible.builtin.include_role: + name: crowdsecurity.testing.python3 + when: + - ansible_facts.distribution in ['CentOS', 'OracleLinux'] + - ansible_facts.distribution_major_version == '8' or ansible_facts.distribution_major_version == '7' + + - name: "Install Postgres" hosts: all become: true diff --git a/test/ansible/requirements.yml b/test/ansible/requirements.yml index a780e827f85..d5a9b80f659 100644 --- a/test/ansible/requirements.yml +++ b/test/ansible/requirements.yml @@ -14,7 +14,7 @@ collections: - name: ansible.posix - name: https://github.com/crowdsecurity/ansible-collection-crowdsecurity.testing.git type: git - version: v0.0.5 + version: v0.0.7 # - name: crowdsecurity.testing # source: ../../../crowdsecurity.testing diff --git a/test/ansible/vars/python.yml b/test/ansible/vars/python.yml new file mode 100644 index 00000000000..0cafdcc3d4c --- /dev/null +++ b/test/ansible/vars/python.yml @@ -0,0 +1 @@ +python_version: "3.12.3" From 2865b69855bab38f45912c84abf87babfa621095 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 5 Jun 2024 08:26:32 +0200 Subject: [PATCH 161/318] fix tests in 01_crowdsec.bats for packages testing (#3054) --- test/bats/01_crowdsec.bats | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 7a449ebd047..83072b0f159 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -152,7 +152,7 @@ teardown() { config_set '.crowdsec_service.acquisition_path=""' ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') - rm -f "$ACQUIS_DIR" + rm -rf "$ACQUIS_DIR" config_set '.common.log_media="stdout"' rune -1 wait-for "$CROWDSEC" @@ -167,7 +167,7 @@ teardown() { config_set '.crowdsec_service.acquisition_path=""' ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') - rm -f "$ACQUIS_DIR" + rm -rf "$ACQUIS_DIR" config_set '.crowdsec_service.acquisition_dir=""' config_set '.common.log_media="stdout"' From 3dd17b908153192a0c3cc4d825a6cf06cce7670f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Jun 2024 11:04:54 +0200 Subject: [PATCH 162/318] tests: log.Fatal -> return err (#3056) * tests: log.Fatal -> return err * lint --- pkg/acquisition/modules/kafka/kafka_test.go | 22 +++++++---- pkg/cwhub/cwhub_test.go | 3 +- pkg/exprhelpers/exprlib_test.go | 41 ++++++++++----------- pkg/exprhelpers/jsonextract_test.go | 18 ++++----- pkg/exprhelpers/xml_test.go | 11 ++++-- pkg/leakybucket/buckets_test.go | 32 +++++++++++----- pkg/parser/parsing_test.go | 13 +++---- 7 files changed, 80 insertions(+), 60 deletions(-) diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 6eda37a377e..54715a81251 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -71,9 +71,8 @@ group_id: crowdsec`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") + for _, test := range tests { k := KafkaSource{} err := k.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -82,7 +81,6 @@ group_id: crowdsec`, } func writeToKafka(w *kafka.Writer, logs []string) { - for idx, log := range logs { err := w.WriteMessages(context.Background(), kafka.Message{ Key: []byte(strconv.Itoa(idx)), @@ -106,7 +104,9 @@ func createTopic(topic string, broker string) { if err != nil { panic(err) } + var controllerConn *kafka.Conn + controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) if err != nil { panic(err) @@ -131,6 +131,7 @@ func TestStreamingAcquisition(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { name string logs []string @@ -159,13 +160,14 @@ func TestStreamingAcquisition(t *testing.T) { Topic: "crowdsecplaintext", }) if w == nil { - log.Fatalf("Unable to setup a kafka producer") + t.Fatal("Unable to setup a kafka producer") } for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} + err := k.Configure([]byte(` source: kafka brokers: @@ -174,12 +176,14 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure kafka source : %s", err) } + tomb := tomb.Tomb{} out := make(chan types.Event) err = k.StreamingAcquisition(out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 + go writeToKafka(w, ts.logs) READLOOP: for { @@ -195,13 +199,13 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) tomb.Wait() }) } - } func TestStreamingAcquisitionWithSSL(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { name string logs []string @@ -229,13 +233,14 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { Topic: "crowdsecssl", }) if w2 == nil { - log.Fatalf("Unable to setup a kafka producer") + t.Fatal("Unable to setup a kafka producer") } for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} + err := k.Configure([]byte(` source: kafka brokers: @@ -250,12 +255,14 @@ tls: if err != nil { t.Fatalf("could not configure kafka source : %s", err) } + tomb := tomb.Tomb{} out := make(chan types.Event) err = k.StreamingAcquisition(out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 + go writeToKafka(w2, ts.logs) READLOOP: for { @@ -271,5 +278,4 @@ tls: tomb.Wait() }) } - } diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 0a1363ebe09..09455fd65a9 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "fmt" "io" "net/http" "os" @@ -107,7 +108,7 @@ func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { // FAKE PARSER resp, ok := responseByPath[req.URL.Path] if !ok { - log.Fatalf("unexpected url :/ %s", req.URL.Path) + return nil, fmt.Errorf("unexpected url: %s", req.URL.Path) } response.Body = io.NopCloser(strings.NewReader(resp)) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 38528083272..687465d9493 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -2,7 +2,6 @@ package exprhelpers import ( "context" - "fmt" "os" "testing" "time" @@ -22,9 +21,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - TestFolder = "tests" -) +const TestFolder = "tests" func getDBClient(t *testing.T) *database.Client { t.Helper() @@ -78,21 +75,21 @@ func TestVisitor(t *testing.T) { name: "debug : can't compile", filter: "static_one.foo.toto == 'lol'", result: false, - err: fmt.Errorf("bad syntax"), + err: errors.New("bad syntax"), env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, }, { name: "debug : can't compile #2", filter: "static_one.f!oo.to/to == 'lol'", result: false, - err: fmt.Errorf("bad syntax"), + err: errors.New("bad syntax"), env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, }, { name: "debug : can't compile #3", filter: "", result: false, - err: fmt.Errorf("bad syntax"), + err: errors.New("bad syntax"), env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, }, } @@ -102,13 +99,13 @@ func TestVisitor(t *testing.T) { for _, test := range tests { compiledFilter, err := expr.Compile(test.filter, GetExprOptions(test.env)...) if err != nil && test.err == nil { - log.Fatalf("compile: %s", err) + t.Fatalf("compile: %s", err) } if compiledFilter != nil { result, err := expr.Run(compiledFilter, test.env) if err != nil && test.err == nil { - log.Fatalf("run : %s", err) + t.Fatalf("run: %s", err) } if isOk := assert.Equal(t, test.result, result); !isOk { @@ -193,10 +190,12 @@ func TestDistanceHelper(t *testing.T) { "lat2": test.lat2, "lon2": test.lon2, } + vm, err := expr.Compile(test.expr, GetExprOptions(env)...) if err != nil { t.Fatalf("pattern:%s val:%s NOK %s", test.lat1, test.lon1, err) } + ret, err := expr.Run(vm, env) if test.valid { require.NoError(t, err) @@ -243,12 +242,12 @@ func TestRegexpCacheBehavior(t *testing.T) { func TestRegexpInFile(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -286,23 +285,23 @@ func TestRegexpInFile(t *testing.T) { for _, test := range tests { compiledFilter, err := expr.Compile(test.filter, GetExprOptions(map[string]interface{}{})...) if err != nil { - log.Fatal(err) + t.Fatal(err) } result, err := expr.Run(compiledFilter, map[string]interface{}{}) if err != nil { - log.Fatal(err) + t.Fatal(err) } if isOk := assert.Equal(t, test.result, result); !isOk { - t.Fatalf("test '%s' : NOK", test.name) + t.Fatalf("test '%s': NOK", test.name) } } } func TestFileInit(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -340,7 +339,7 @@ func TestFileInit(t *testing.T) { for _, test := range tests { err := FileInit(TestFolder, test.filename, test.types) if err != nil { - log.Fatal(err) + t.Fatal(err) } switch test.types { @@ -376,12 +375,12 @@ func TestFileInit(t *testing.T) { func TestFile(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data.txt", "string") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -419,12 +418,12 @@ func TestFile(t *testing.T) { for _, test := range tests { compiledFilter, err := expr.Compile(test.filter, GetExprOptions(map[string]interface{}{})...) if err != nil { - log.Fatal(err) + t.Fatal(err) } result, err := expr.Run(compiledFilter, map[string]interface{}{}) if err != nil { - log.Fatal(err) + t.Fatal(err) } if isOk := assert.Equal(t, test.result, result); !isOk { @@ -1426,7 +1425,7 @@ func TestParseUnixTime(t *testing.T) { func TestIsIp(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go index 1bd45aa2d6a..2ee3ada5ea7 100644 --- a/pkg/exprhelpers/jsonextract_test.go +++ b/pkg/exprhelpers/jsonextract_test.go @@ -3,8 +3,6 @@ package exprhelpers import ( "testing" - log "github.com/sirupsen/logrus" - "github.com/antonmedv/expr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -12,12 +10,12 @@ import ( func TestJsonExtract(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -67,12 +65,12 @@ func TestJsonExtract(t *testing.T) { func TestJsonExtractUnescape(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -115,12 +113,12 @@ func TestJsonExtractUnescape(t *testing.T) { func TestJsonExtractSlice(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -178,12 +176,12 @@ func TestJsonExtractSlice(t *testing.T) { func TestJsonExtractObject(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { diff --git a/pkg/exprhelpers/xml_test.go b/pkg/exprhelpers/xml_test.go index 516387f764b..42823884025 100644 --- a/pkg/exprhelpers/xml_test.go +++ b/pkg/exprhelpers/xml_test.go @@ -9,7 +9,7 @@ import ( func TestXMLGetAttributeValue(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -58,17 +58,19 @@ func TestXMLGetAttributeValue(t *testing.T) { for _, test := range tests { result, _ := XMLGetAttributeValue(test.xmlString, test.path, test.attribute) + isOk := assert.Equal(t, test.expectResult, result) if !isOk { t.Fatalf("test '%s' failed", test.name) } + log.Printf("test '%s' : OK", test.name) } - } + func TestXMLGetNodeValue(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -105,11 +107,12 @@ func TestXMLGetNodeValue(t *testing.T) { for _, test := range tests { result, _ := XMLGetNodeValue(test.xmlString, test.path) + isOk := assert.Equal(t, test.expectResult, result) if !isOk { t.Fatalf("test '%s' failed", test.name) } + log.Printf("test '%s' : OK", test.name) } - } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index ad0d1a79879..4bb3c96759e 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -52,7 +52,7 @@ func TestBucket(t *testing.T) { err = exprhelpers.Init(nil) if err != nil { - log.Fatalf("exprhelpers init failed: %s", err) + t.Fatalf("exprhelpers init failed: %s", err) } if envSetting != "" { @@ -61,25 +61,31 @@ func TestBucket(t *testing.T) { } } else { wg := new(sync.WaitGroup) + fds, err := os.ReadDir(testdata) if err != nil { t.Fatalf("Unable to read test directory : %s", err) } + for _, fd := range fds { if fd.Name() == "hub" { continue } + fname := filepath.Join(testdata, fd.Name()) log.Infof("Running test on %s", fname) tomb.Go(func() error { wg.Add(1) defer wg.Done() + if err := testOneBucket(t, hub, fname, tomb); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } + return nil }) } + wg.Wait() } } @@ -92,12 +98,12 @@ func watchTomb(tomb *tomb.Tomb) { log.Warning("Tomb is dead") break } + time.Sleep(100 * time.Millisecond) } } func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) error { - var ( holders []BucketFactory @@ -105,9 +111,9 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er stagecfg string stages []parser.Stagefile err error - buckets *Buckets ) - buckets = NewBuckets() + + buckets := NewBuckets() /*load the scenarios*/ stagecfg = dir + "/scenarios.yaml" @@ -117,51 +123,59 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er tmpl, err := template.New("test").Parse(string(stagefiles)) if err != nil { - return fmt.Errorf("failed to parse template %s : %s", stagefiles, err) + return fmt.Errorf("failed to parse template %s: %w", stagefiles, err) } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) if err != nil { panic(err) } + if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { - log.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + t.Fatalf("failed unmarshaling %s : %s", stagecfg, err) } + files := []string{} for _, x := range stages { files = append(files, x.Filename) } cscfg := &csconfig.CrowdsecServiceCfg{} + holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false) if err != nil { t.Fatalf("failed loading bucket : %s", err) } + tomb.Go(func() error { watchTomb(tomb) return nil }) + if !testFile(t, filepath.Join(dir, "test.json"), filepath.Join(dir, "in-buckets_state.json"), holders, response, buckets) { return fmt.Errorf("tests from %s failed", dir) } + return nil } func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event, buckets *Buckets) bool { - var results []types.Event var dump bool - //should we restore + // should we restore if _, err := os.Stat(bs); err == nil { dump = true + if err := LoadBucketsState(bs, buckets, holders); err != nil { t.Fatalf("Failed to load bucket state : %s", err) } } /* now we can load the test files */ - //process the yaml + // process the yaml yamlFile, err := os.Open(file) if err != nil { t.Errorf("yamlFile.Get err #%v ", err) diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 902a3cccbc7..ed7c0d3df9a 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -100,11 +100,11 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) cfg, err := os.ReadFile(parser_cfg_file) if err != nil { - return fmt.Errorf("failed opening %s : %s", parser_cfg_file, err) + return fmt.Errorf("failed opening %s: %w", parser_cfg_file, err) } tmpl, err := template.New("test").Parse(string(cfg)) if err != nil { - return fmt.Errorf("failed to parse template %s : %s", cfg, err) + return fmt.Errorf("failed to parse template %s: %w", cfg, err) } var out bytes.Buffer err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) @@ -112,12 +112,12 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing panic(err) } if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { - return fmt.Errorf("failed unmarshaling %s : %s", parser_cfg_file, err) + return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) } pnodes, err = LoadStages(parser_configs, pctx, ectx) if err != nil { - return fmt.Errorf("unable to load parser config : %s", err) + return fmt.Errorf("unable to load parser config: %w", err) } //TBD: Load post overflows @@ -147,7 +147,7 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { err = exprhelpers.Init(nil) if err != nil { - log.Fatalf("exprhelpers init failed: %s", err) + return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) } //Load enrichment @@ -158,7 +158,7 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { } ectx, err = Loadplugin() if err != nil { - log.Fatalf("failed to load plugin geoip : %v", err) + return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) } log.Printf("Loaded -> %+v", ectx) @@ -299,7 +299,6 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error only the keys of the expected part are checked against result */ if len(testSet.Results) == 0 && len(results) == 0 { - log.Fatal("No results, no tests, abort.") return false, errors.New("no tests, no results") } From 73e03ef556e15139d4ab30309703c688e7806326 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Jun 2024 11:37:57 +0200 Subject: [PATCH 163/318] lint: enable revive/if-return, revive/error-strings (#3057) * lint: enable revive/if-return, revive/error-strings * lint --- .golangci.yml | 4 --- cmd/crowdsec-cli/console.go | 5 +-- cmd/crowdsec-cli/itemcli.go | 6 +--- cmd/crowdsec-cli/lapi.go | 11 ++---- cmd/crowdsec-cli/metrics.go | 6 +--- cmd/crowdsec-cli/notifications.go | 5 +-- cmd/crowdsec-cli/papi.go | 5 +-- cmd/crowdsec-cli/setup.go | 6 +--- cmd/crowdsec/api.go | 5 +-- pkg/csconfig/api.go | 6 +--- pkg/csconfig/fflag.go | 5 +-- pkg/exprhelpers/helpers.go | 42 +++++++++++++++-------- pkg/parser/node.go | 14 ++++---- pkg/parser/parsing_test.go | 57 ++++++++++++++++++++++++------- 14 files changed, 89 insertions(+), 88 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 61278185633..fd237c95bcc 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -140,8 +140,6 @@ linters-settings: disabled: true - name: error-naming disabled: true - - name: error-strings - disabled: true - name: flag-parameter disabled: true - name: function-result-limit @@ -150,8 +148,6 @@ linters-settings: disabled: true - name: get-return disabled: true - - name: if-return - disabled: true - name: increment-decrement disabled: true - name: indent-error-flow diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index eab24a5d709..972d43122cc 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -51,11 +51,8 @@ func (cli *cliConsole) NewCommand() *cobra.Command { if err := require.CAPI(cfg); err != nil { return err } - if err := require.CAPIRegistered(cfg); err != nil { - return err - } - return nil + return require.CAPIRegistered(cfg) }, } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index e1a908f3492..66a3f4fc051 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -442,11 +442,7 @@ func (cli cliItem) list(args []string, all bool) error { return err } - if err = listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output); err != nil { - return err - } - - return nil + return listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0d7ebc64431..fcef137e424 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -275,11 +275,7 @@ func (cli *cliLapi) addContext(key string, values []string) error { cfg.Crowdsec.ContextToSend[key] = data } - if err := cfg.Crowdsec.DumpContextConfigFile(); err != nil { - return err - } - - return nil + return cfg.Crowdsec.DumpContextConfigFile() } func (cli *cliLapi) newContextAddCmd() *cobra.Command { @@ -307,10 +303,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user } if keyToAdd != "" { - if err := cli.addContext(keyToAdd, valuesToAdd); err != nil { - return err - } - return nil + return cli.addContext(keyToAdd, valuesToAdd) } for _, v := range valuesToAdd { diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index ad2b9ee79d8..7858a7a7b2b 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -331,11 +331,7 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { } } - if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil { - return err - } - - return nil + return ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit) } func (cli *cliMetrics) NewCommand() *cobra.Command { diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index d678bf0e306..768d6a1c47e 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -66,11 +66,8 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - if err := require.Notifications(cfg); err != nil { - return err - } - return nil + return require.Notifications(cfg) }, } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index bea03c12848..b8ed0fd7356 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -39,11 +39,8 @@ func (cli *cliPapi) NewCommand() *cobra.Command { if err := require.CAPI(cfg); err != nil { return err } - if err := require.PAPI(cfg); err != nil { - return err - } - return nil + return require.PAPI(cfg) }, } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 8a8c5a6c665..3e12b2465dd 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -320,11 +320,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { return err } - if err = setup.InstallHubItems(hub, input, dryRun); err != nil { - return err - } - - return nil + return setup.InstallHubItems(hub, input, dryRun) } func runSetupValidate(cmd *cobra.Command, args []string) error { diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 995345a25e4..c57b8d87cff 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -76,10 +76,7 @@ func serveAPIServer(apiServer *apiserver.APIServer) { <-apiTomb.Dying() // lock until go routine is dying pluginTomb.Kill(nil) log.Infof("serve: shutting down api server") - if err := apiServer.Shutdown(); err != nil { - return err - } - return nil + return apiServer.Shutdown() }) <-apiReady } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 4d10690735d..267e101cea8 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -451,9 +451,5 @@ func (c *Config) LoadAPIClient() error { return errors.New("no API client section in configuration") } - if err := c.API.Client.Load(); err != nil { - return err - } - - return nil + return c.API.Client.Load() } diff --git a/pkg/csconfig/fflag.go b/pkg/csconfig/fflag.go index 7311f9e751a..c86686889eb 100644 --- a/pkg/csconfig/fflag.go +++ b/pkg/csconfig/fflag.go @@ -12,10 +12,7 @@ import ( // LoadFeatureFlagsEnv parses the environment variables to enable feature flags. func LoadFeatureFlagsEnv(logger *log.Logger) error { - if err := fflag.Crowdsec.SetFromEnv(logger); err != nil { - return err - } - return nil + return fflag.Crowdsec.SetFromEnv(logger) } // FeatureFlagsFileLocation returns the path to the feature.yaml file. diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 575425ef83e..e0a3a523c3e 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -35,9 +35,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var dataFile map[string][]string -var dataFileRegex map[string][]*regexp.Regexp -var dataFileRe2 map[string][]*re2.Regexp +var ( + dataFile map[string][]string + dataFileRegex map[string][]*regexp.Regexp + dataFileRe2 map[string][]*re2.Regexp +) // This is used to (optionally) cache regexp results for RegexpInFile operations var dataFileRegexCache map[string]gcache.Cache = make(map[string]gcache.Cache) @@ -57,9 +59,11 @@ var exprFunctionOptions []expr.Option var keyValuePattern = regexp.MustCompile(`(?P[^=\s]+)=(?:"(?P[^"\\]*(?:\\.[^"\\]*)*)"|(?P[^=\s]+)|\s*)`) -var geoIPCityReader *geoip2.Reader -var geoIPASNReader *geoip2.Reader -var geoIPRangeReader *maxminddb.Reader +var ( + geoIPCityReader *geoip2.Reader + geoIPASNReader *geoip2.Reader + geoIPRangeReader *maxminddb.Reader +) func GetExprOptions(ctx map[string]interface{}) []expr.Option { if len(exprFunctionOptions) == 0 { @@ -72,9 +76,11 @@ func GetExprOptions(ctx map[string]interface{}) []expr.Option { )) } } + ret := []expr.Option{} ret = append(ret, exprFunctionOptions...) ret = append(ret, expr.Env(ctx)) + return ret } @@ -106,9 +112,11 @@ func GeoIPClose() { if geoIPCityReader != nil { geoIPCityReader.Close() } + if geoIPASNReader != nil { geoIPASNReader.Close() } + if geoIPRangeReader != nil { geoIPRangeReader.Close() } @@ -124,16 +132,15 @@ func Init(databaseClient *database.Client) error { } func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { - - //cache is explicitly disabled + // cache is explicitly disabled if CacheCfg.Cache != nil && !*CacheCfg.Cache { return nil } - //cache is implicitly disabled if no cache config is provided + // cache is implicitly disabled if no cache config is provided if CacheCfg.Strategy == nil && CacheCfg.TTL == nil && CacheCfg.Size == nil { return nil } - //cache is enabled + // cache is enabled if CacheCfg.Size == nil { CacheCfg.Size = ptr.Of(50) @@ -144,6 +151,7 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { if CacheCfg.Strategy == nil { CacheCfg.Strategy = ptr.Of("LRU") } + switch *CacheCfg.Strategy { case "LRU": gc = gc.LRU() @@ -158,14 +166,17 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { if CacheCfg.TTL != nil { gc.Expiration(*CacheCfg.TTL) } + cache := gc.Build() dataFileRegexCache[filename] = cache + return nil } // UpdateCacheMetrics is called directly by the prom handler func UpdateRegexpCacheMetrics() { RegexpCacheMetrics.Reset() + for name := range dataFileRegexCache { RegexpCacheMetrics.With(prometheus.Labels{"name": name}).Set(float64(dataFileRegexCache[name].Len(true))) } @@ -173,10 +184,12 @@ func UpdateRegexpCacheMetrics() { func FileInit(fileFolder string, filename string, fileType string) error { log.Debugf("init (folder:%s) (file:%s) (type:%s)", fileFolder, filename, fileType) + if fileType == "" { log.Debugf("ignored file %s%s because no type specified", fileFolder, filename) return nil } + ok, err := existsInFileMaps(filename, fileType) if ok { log.Debugf("ignored file %s%s because already loaded", fileFolder, filename) @@ -187,6 +200,7 @@ func FileInit(fileFolder string, filename string, fileType string) error { } filepath := filepath.Join(fileFolder, filename) + file, err := os.Open(filepath) if err != nil { return err @@ -201,28 +215,26 @@ func FileInit(fileFolder string, filename string, fileType string) error { if len(scanner.Text()) == 0 { //skip empty lines continue } + switch fileType { case "regex", "regexp": if fflag.Re2RegexpInfileSupport.IsEnabled() { dataFileRe2[filename] = append(dataFileRe2[filename], re2.MustCompile(scanner.Text())) continue } + dataFileRegex[filename] = append(dataFileRegex[filename], regexp.MustCompile(scanner.Text())) case "string": dataFile[filename] = append(dataFile[filename], scanner.Text()) } } - if err := scanner.Err(); err != nil { - return err - } - return nil + return scanner.Err() } // Expr helpers func Distinct(params ...any) (any, error) { - if rt := reflect.TypeOf(params[0]).Kind(); rt != reflect.Slice && rt != reflect.Array { return nil, nil } diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 11ffb8aa7fa..74c9e94a331 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -65,8 +65,7 @@ type Node struct { } func (n *Node) validate(ectx EnricherCtx) error { - - //stage is being set automagically + // stage is being set automagically if n.Stage == "" { return errors.New("stage needs to be an existing stage") } @@ -244,12 +243,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri gstr = val } else { clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) + NodeState = false } } else if n.Grok.RunTimeValue != nil { output, err := exprhelpers.Run(n.Grok.RunTimeValue, cachedExprEnv, clog, n.Debug) if err != nil { clog.Warningf("failed to run RunTimeValue : %v", err) + NodeState = false } @@ -352,6 +353,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri if err != nil { clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) clog.Debugf("Event leaving node : ko") + return false, err } @@ -498,7 +500,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) if err != nil { - return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err) + return fmt.Errorf("unable to find grok '%s': %v", n.Grok.RegexpName, err) } if n.Grok.RunTimeRegexp == nil { @@ -636,9 +638,5 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return errors.New("Node is empty") } - if err := n.validate(ectx); err != nil { - return err - } - - return nil + return n.validate(ectx) } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index ed7c0d3df9a..3193631f4dd 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -28,13 +28,16 @@ var debug bool = false func TestParser(t *testing.T) { debug = true + log.SetLevel(log.InfoLevel) - var envSetting = os.Getenv("TEST_ONLY") + envSetting := os.Getenv("TEST_ONLY") + pctx, ectx, err := prepTests() if err != nil { t.Fatalf("failed to load env : %s", err) } - //Init the enricher + + // Init the enricher if envSetting != "" { if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) @@ -44,12 +47,15 @@ func TestParser(t *testing.T) { if err != nil { t.Fatalf("Unable to read test directory : %s", err) } + for _, fd := range fds { if !fd.IsDir() { continue } + fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) + if err := testOneParser(pctx, ectx, fname, nil); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } @@ -59,13 +65,17 @@ func TestParser(t *testing.T) { func BenchmarkParser(t *testing.B) { log.Printf("start bench !!!!") + debug = false + log.SetLevel(log.ErrorLevel) + pctx, ectx, err := prepTests() if err != nil { t.Fatalf("failed to load env : %s", err) } - var envSetting = os.Getenv("TEST_ONLY") + + envSetting := os.Getenv("TEST_ONLY") if envSetting != "" { if err := testOneParser(pctx, ectx, envSetting, t); err != nil { @@ -76,12 +86,15 @@ func BenchmarkParser(t *testing.B) { if err != nil { t.Fatalf("Unable to read test directory : %s", err) } + for _, fd := range fds { if !fd.IsDir() { continue } + fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) + if err := testOneParser(pctx, ectx, fname, t); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } @@ -91,26 +104,32 @@ func BenchmarkParser(t *testing.B) { func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { var ( - err error - pnodes []Node - + err error + pnodes []Node parser_configs []Stagefile ) + log.Warningf("testing %s", dir) + parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) + cfg, err := os.ReadFile(parser_cfg_file) if err != nil { return fmt.Errorf("failed opening %s: %w", parser_cfg_file, err) } + tmpl, err := template.New("test").Parse(string(cfg)) if err != nil { return fmt.Errorf("failed to parse template %s: %w", cfg, err) } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) if err != nil { panic(err) } + if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) } @@ -120,20 +139,23 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing return fmt.Errorf("unable to load parser config: %w", err) } - //TBD: Load post overflows - //func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { + // TBD: Load post overflows + // func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { parser_test_file := fmt.Sprintf("%s/test.yaml", dir) tests := loadTestFile(parser_test_file) count := 1 + if b != nil { count = b.N b.ResetTimer() } + for n := 0; n < count; n++ { if !testFile(tests, *pctx, pnodes) { - return errors.New("test failed !") + return errors.New("test failed") } } + return nil } @@ -150,27 +172,31 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) } - //Load enrichment + // Load enrichment datadir := "./test_data/" + err = exprhelpers.GeoIPInit(datadir) if err != nil { log.Fatalf("unable to initialize GeoIP: %s", err) } + ectx, err = Loadplugin() if err != nil { return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) } + log.Printf("Loaded -> %+v", ectx) - //Load the parser patterns + // Load the parser patterns cfgdir := "../../config/" /* this should be refactored to 2 lines :p */ // Init the parser pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) if err != nil { - return nil, ectx, fmt.Errorf("failed to initialize parser : %v", err) + return nil, ectx, fmt.Errorf("failed to initialize parser: %v", err) } + return pctx, ectx, nil } @@ -179,21 +205,28 @@ func loadTestFile(file string) []TestFile { if err != nil { log.Fatalf("yamlFile.Get err #%v ", err) } + dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) var testSet []TestFile + for { tf := TestFile{} + err := dec.Decode(&tf) if err != nil { if errors.Is(err, io.EOF) { break } + log.Fatalf("Failed to load testfile '%s' yaml error : %v", file, err) + return nil } + testSet = append(testSet, tf) } + return testSet } From 9e859c0c8c97c3bec6564170ab796847ddf98e24 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Jun 2024 12:01:13 +0200 Subject: [PATCH 164/318] refactor pkg/database, pkg/models (#3022) * pkg/models: Source.String() * pkg/models: Alert.FormatAsStrings() * cscli alerts list: sort remediation keys avoid printing "ban: ... captcha: ..." in one line, and "captcha: ... ban: ..." in another * remove unused methods; drop else branch * lint --- cmd/crowdsec-cli/alerts.go | 6 +- pkg/database/alerts.go | 100 +------------------------------- pkg/models/helpers.go | 113 +++++++++++++++++++++++++++++++++---- 3 files changed, 109 insertions(+), 110 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 9f4c5093afc..ca99b54f59f 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -19,6 +19,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwversion" @@ -41,12 +43,12 @@ func DecisionsFromAlert(alert *models.Alert) string { decMap[k] = v + 1 } - for k, v := range decMap { + for _, key := range maptools.SortedKeys(decMap) { if len(ret) > 0 { ret += " " } - ret += fmt.Sprintf("%s:%d", k, v) + ret += fmt.Sprintf("%s:%d", key, decMap[key]) } return ret diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 37e9397709e..d9efe4c254b 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -9,7 +9,6 @@ import ( "strings" "time" - "github.com/davecgh/go-spew/spew" "github.com/mattn/go-sqlite3" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -33,101 +32,6 @@ const ( maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) -func formatAlertCN(source models.Source) string { - cn := source.Cn - - if source.AsNumber != "" { - cn += "/" + source.AsNumber - } - - return cn -} - -func formatAlertSource(alert *models.Alert) string { - if alert.Source == nil || alert.Source.Scope == nil || *alert.Source.Scope == "" { - return "empty source" - } - - if *alert.Source.Scope == types.Ip { - ret := "ip " + *alert.Source.Value - - cn := formatAlertCN(*alert.Source) - if cn != "" { - ret += " (" + cn + ")" - } - - return ret - } - - if *alert.Source.Scope == types.Range { - ret := "range " + *alert.Source.Value - - cn := formatAlertCN(*alert.Source) - if cn != "" { - ret += " (" + cn + ")" - } - - return ret - } - - return *alert.Source.Scope + " " + *alert.Source.Value -} - -func formatAlertAsString(machineID string, alert *models.Alert) []string { - src := formatAlertSource(alert) - - msg := "empty scenario" - if alert.Scenario != nil && *alert.Scenario != "" { - msg = *alert.Scenario - } else if alert.Message != nil && *alert.Message != "" { - msg = *alert.Message - } - - reason := fmt.Sprintf("%s by %s", msg, src) - - if len(alert.Decisions) == 0 { - return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} - } - - var retStr []string - - if alert.Decisions[0].Origin != nil && *alert.Decisions[0].Origin == types.CscliImportOrigin { - return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} - } - - for i, decisionItem := range alert.Decisions { - decision := "" - if alert.Simulated != nil && *alert.Simulated { - decision = "(simulated alert)" - } else if decisionItem.Simulated != nil && *decisionItem.Simulated { - decision = "(simulated decision)" - } - - if log.GetLevel() >= log.DebugLevel { - /*spew is expensive*/ - log.Debugf("%s", spew.Sdump(decisionItem)) - } - - if len(alert.Decisions) > 1 { - reason = fmt.Sprintf("%s for %d/%d decisions", msg, i+1, len(alert.Decisions)) - } - - var machineIDOrigin string - if machineID == "" { - machineIDOrigin = *decisionItem.Origin - } else { - machineIDOrigin = fmt.Sprintf("%s/%s", machineID, *decisionItem.Origin) - } - - decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration, - *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) - retStr = append(retStr, - fmt.Sprintf("(%s) %s : %s", machineIDOrigin, reason, decision)) - } - - return retStr -} - // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them @@ -562,8 +466,9 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ stopAtTime = time.Now().UTC() } + /*display proper alert in logs*/ - for _, disp := range formatAlertAsString(machineID, alertItem) { + for _, disp := range alertItem.FormatAsStrings(machineID, log.StandardLogger()) { c.Log.Info(disp) } @@ -649,6 +554,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(metaItem.Value) > 4095 { c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + value = value[:4095] } diff --git a/pkg/models/helpers.go b/pkg/models/helpers.go index 8c082550d48..5bc3f2a28b3 100644 --- a/pkg/models/helpers.go +++ b/pkg/models/helpers.go @@ -1,27 +1,33 @@ package models -func (a *Alert) HasRemediation() bool { - return true -} +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" +) + +const ( + // these are duplicated from pkg/types + // TODO XXX: de-duplicate + Ip = "Ip" + Range = "Range" + CscliImportOrigin = "cscli-import" +) func (a *Alert) GetScope() string { - if a.Source.Scope == nil { - return "" - } - return *a.Source.Scope + return a.Source.GetScope() } func (a *Alert) GetValue() string { - if a.Source.Value == nil { - return "" - } - return *a.Source.Value + return a.Source.GetValue() } func (a *Alert) GetScenario() string { if a.Scenario == nil { return "" } + return *a.Scenario } @@ -29,6 +35,7 @@ func (a *Alert) GetEventsCount() int32 { if a.EventsCount == nil { return 0 } + return *a.EventsCount } @@ -38,6 +45,7 @@ func (e *Event) GetMeta(key string) string { return meta.Value } } + return "" } @@ -47,6 +55,7 @@ func (a *Alert) GetMeta(key string) string { return meta.Value } } + return "" } @@ -54,6 +63,7 @@ func (s Source) GetValue() string { if s.Value == nil { return "" } + return *s.Value } @@ -61,6 +71,7 @@ func (s Source) GetScope() string { if s.Scope == nil { return "" } + return *s.Scope } @@ -69,8 +80,88 @@ func (s Source) GetAsNumberName() string { if s.AsNumber != "0" { ret += s.AsNumber } + if s.AsName != "" { ret += " " + s.AsName } + return ret } + +func (s *Source) String() string { + if s == nil || s.Scope == nil || *s.Scope == "" { + return "empty source" + } + + cn := s.Cn + + if s.AsNumber != "" { + cn += "/" + s.AsNumber + } + + if cn != "" { + cn = " (" + cn + ")" + } + + switch *s.Scope { + case Ip: + return "ip " + *s.Value + cn + case Range: + return "range " + *s.Value + cn + default: + return *s.Scope + " " + *s.Value + } +} + +func (a *Alert) FormatAsStrings(machineID string, logger *log.Logger) []string { + src := a.Source.String() + + msg := "empty scenario" + if a.Scenario != nil && *a.Scenario != "" { + msg = *a.Scenario + } else if a.Message != nil && *a.Message != "" { + msg = *a.Message + } + + reason := fmt.Sprintf("%s by %s", msg, src) + + if len(a.Decisions) == 0 { + return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} + } + + var retStr []string + + if a.Decisions[0].Origin != nil && *a.Decisions[0].Origin == CscliImportOrigin { + return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} + } + + for i, decisionItem := range a.Decisions { + decision := "" + if a.Simulated != nil && *a.Simulated { + decision = "(simulated alert)" + } else if decisionItem.Simulated != nil && *decisionItem.Simulated { + decision = "(simulated decision)" + } + + if logger.GetLevel() >= log.DebugLevel { + /*spew is expensive*/ + logger.Debug(spew.Sdump(decisionItem)) + } + + if len(a.Decisions) > 1 { + reason = fmt.Sprintf("%s for %d/%d decisions", msg, i+1, len(a.Decisions)) + } + + origin := *decisionItem.Origin + if machineID != "" { + origin = machineID + "/" + origin + } + + decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration, + *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) + retStr = append(retStr, + fmt.Sprintf("(%s) %s : %s", origin, reason, decision)) + } + + return retStr +} From 114a966129caa2962ada88f3368a27859f382a86 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 10:21:03 +0200 Subject: [PATCH 165/318] refactor "cscli decisions" (#3061) * cscli decisions: extract method add() * cscli decisions: extract method delete() * cscli lapi: avoid apiclient global * cscli capi: avoid apiclient global * cscli decisions: error cleanup, avoid global for api client * update test * cscli alerts: extract method delete() * lint --- cmd/crowdsec-cli/alerts.go | 111 ++++----- cmd/crowdsec-cli/capi.go | 4 +- cmd/crowdsec-cli/decisions.go | 351 ++++++++++++++------------- cmd/crowdsec-cli/decisions_import.go | 6 +- cmd/crowdsec-cli/lapi.go | 7 +- test/bats/90_decisions.bats | 1 - 6 files changed, 252 insertions(+), 228 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index ca99b54f59f..e1b070ab0fc 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -204,7 +204,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { } apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { - return fmt.Errorf("parsing api url %s: %w", apiURL, err) + return fmt.Errorf("parsing api url: %w", err) } cli.client, err = apiclient.NewClient(&apiclient.Config{ @@ -215,7 +215,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { VersionPrefix: "v1", }) if err != nil { - return fmt.Errorf("new api client: %w", err) + return fmt.Errorf("creating api client: %w", err) } return nil @@ -370,6 +370,60 @@ cscli alerts list --type ban`, return cmd } +func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, ActiveDecision *bool, AlertDeleteAll bool, delAlertByID string, contained *bool) error { + var err error + + if !AlertDeleteAll { + if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, + alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + return err + } + if ActiveDecision != nil { + alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + } + + if *alertDeleteFilter.ScopeEquals == "" { + alertDeleteFilter.ScopeEquals = nil + } + if *alertDeleteFilter.ValueEquals == "" { + alertDeleteFilter.ValueEquals = nil + } + if *alertDeleteFilter.ScenarioEquals == "" { + alertDeleteFilter.ScenarioEquals = nil + } + if *alertDeleteFilter.IPEquals == "" { + alertDeleteFilter.IPEquals = nil + } + if *alertDeleteFilter.RangeEquals == "" { + alertDeleteFilter.RangeEquals = nil + } + if contained != nil && *contained { + alertDeleteFilter.Contains = new(bool) + } + limit := 0 + alertDeleteFilter.Limit = &limit + } else { + limit := 0 + alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} + } + + var alerts *models.DeleteAlertsResponse + if delAlertByID == "" { + alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) + if err != nil { + return fmt.Errorf("unable to delete alerts: %w", err) + } + } else { + alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) + if err != nil { + return fmt.Errorf("unable to delete alert: %w", err) + } + } + log.Infof("%s alert(s) deleted", alerts.NbDeleted) + + return nil +} + func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { var ( ActiveDecision *bool @@ -411,58 +465,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - var err error - - if !AlertDeleteAll { - if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, - alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { - printHelp(cmd) - return err - } - if ActiveDecision != nil { - alertDeleteFilter.ActiveDecisionEquals = ActiveDecision - } - - if *alertDeleteFilter.ScopeEquals == "" { - alertDeleteFilter.ScopeEquals = nil - } - if *alertDeleteFilter.ValueEquals == "" { - alertDeleteFilter.ValueEquals = nil - } - if *alertDeleteFilter.ScenarioEquals == "" { - alertDeleteFilter.ScenarioEquals = nil - } - if *alertDeleteFilter.IPEquals == "" { - alertDeleteFilter.IPEquals = nil - } - if *alertDeleteFilter.RangeEquals == "" { - alertDeleteFilter.RangeEquals = nil - } - if contained != nil && *contained { - alertDeleteFilter.Contains = new(bool) - } - limit := 0 - alertDeleteFilter.Limit = &limit - } else { - limit := 0 - alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} - } - - var alerts *models.DeleteAlertsResponse - if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) - if err != nil { - return fmt.Errorf("unable to delete alerts: %w", err) - } - } else { - alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) - if err != nil { - return fmt.Errorf("unable to delete alert: %w", err) - } - } - log.Infof("%s alert(s) deleted", alerts.NbDeleted) - - return nil + return cli.delete(alertDeleteFilter, ActiveDecision, AlertDeleteAll, delAlertByID, contained) }, } diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 7b1613c5463..6933d355071 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -164,7 +164,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return errors.New("no scenarios installed, abort") } - Client, err = apiclient.NewDefaultClient(apiURL, + client, err := apiclient.NewDefaultClient(apiURL, CAPIURLPrefix, cwversion.UserAgent(), nil) @@ -180,7 +180,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: scenarios, } - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { return err } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index de3bf73dca2..df35fb9bb18 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -23,8 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var Client *apiclient.ApiClient - func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ spamLimit := make(map[string]bool) @@ -117,7 +115,8 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin } type cliDecisions struct { - cfg configGetter + client *apiclient.ApiClient + cfg configGetter } func NewCLIDecisions(cfg configGetter) *cliDecisions { @@ -141,16 +140,16 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - password := strfmt.Password(cfg.API.Client.Credentials.Password) - apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { - return fmt.Errorf("parsing api url %s: %w", cfg.API.Client.Credentials.URL, err) + return fmt.Errorf("parsing api url: %w", err) } - Client, err = apiclient.NewClient(&apiclient.Config{ + + cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, - Password: password, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), UserAgent: cwversion.UserAgent(), - URL: apiurl, + URL: apiURL, VersionPrefix: "v1", }) if err != nil { @@ -247,7 +246,7 @@ func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, con filter.Contains = new(bool) } - alerts, _, err := Client.Alerts.List(context.Background(), filter) + alerts, _, err := cli.client.Alerts.List(context.Background(), filter) if err != nil { return fmt.Errorf("unable to retrieve decisions: %w", err) } @@ -294,25 +293,103 @@ cscli decisions list --origin lists --scenario list_name return cli.list(filter, NoSimu, contained, printMachine) }, } - cmd.Flags().SortFlags = false - cmd.Flags().BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") - cmd.Flags().StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") - cmd.Flags().StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") - cmd.Flags().StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") - cmd.Flags().StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") - cmd.Flags().StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - cmd.Flags().StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") - cmd.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") - cmd.Flags().IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)") - cmd.Flags().BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") - cmd.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that triggered decisions") - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + flags := cmd.Flags() + flags.SortFlags = false + flags.BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + flags.StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + flags.StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + flags.StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") + flags.StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") + flags.StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") + flags.StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + flags.StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") + flags.IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)") + flags.BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") + flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that triggered decisions") + flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd } +func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { + alerts := models.AddAlertsRequest{} + origin := types.CscliOrigin + capacity := int32(0) + leakSpeed := "0" + eventsCount := int32(1) + empty := "" + simulated := false + startAt := time.Now().UTC().Format(time.RFC3339) + stopAt := time.Now().UTC().Format(time.RFC3339) + createdAt := time.Now().UTC().Format(time.RFC3339) + + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + return err + } + + if addIP != "" { + addValue = addIP + addScope = types.Ip + } else if addRange != "" { + addValue = addRange + addScope = types.Range + } else if addValue == "" { + return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)") + } + + if addReason == "" { + addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) + } + + decision := models.Decision{ + Duration: &addDuration, + Scope: &addScope, + Value: &addValue, + Type: &addType, + Scenario: &addReason, + Origin: &origin, + } + alert := models.Alert{ + Capacity: &capacity, + Decisions: []*models.Decision{&decision}, + Events: []*models.Event{}, + EventsCount: &eventsCount, + Leakspeed: &leakSpeed, + Message: &addReason, + ScenarioHash: &empty, + Scenario: &addReason, + ScenarioVersion: &empty, + Simulated: &simulated, + // setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes + Source: &models.Source{ + AsName: "", + AsNumber: "", + Cn: "", + IP: addValue, + Range: "", + Scope: &addScope, + Value: &addValue, + }, + StartAt: &startAt, + StopAt: &stopAt, + CreatedAt: createdAt, + } + alerts = append(alerts, &alert) + + _, _, err := cli.client.Alerts.Add(context.Background(), alerts) + if err != nil { + return err + } + + log.Info("Decision successfully added") + + return nil +} + func (cli *cliDecisions) newAddCmd() *cobra.Command { var ( addIP string @@ -336,93 +413,84 @@ cscli decisions add --scope username --value foobar Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error - alerts := models.AddAlertsRequest{} - origin := types.CscliOrigin - capacity := int32(0) - leakSpeed := "0" - eventsCount := int32(1) - empty := "" - simulated := false - startAt := time.Now().UTC().Format(time.RFC3339) - stopAt := time.Now().UTC().Format(time.RFC3339) - createdAt := time.Now().UTC().Format(time.RFC3339) - - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { - return err - } + return cli.add(addIP, addRange, addDuration, addValue, addScope, addReason, addType) + }, + } - if addIP != "" { - addValue = addIP - addScope = types.Ip - } else if addRange != "" { - addValue = addRange - addScope = types.Range - } else if addValue == "" { - printHelp(cmd) - return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)") - } + flags := cmd.Flags() + flags.SortFlags = false + flags.StringVarP(&addIP, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(&addRange, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVarP(&addDuration, "duration", "d", "4h", "Decision duration (ie. 1h,4h,30m)") + flags.StringVarP(&addValue, "value", "v", "", "The value (ie. --scope username --value foobar)") + flags.StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") + flags.StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") + flags.StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") - if addReason == "" { - addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) - } - decision := models.Decision{ - Duration: &addDuration, - Scope: &addScope, - Value: &addValue, - Type: &addType, - Scenario: &addReason, - Origin: &origin, - } - alert := models.Alert{ - Capacity: &capacity, - Decisions: []*models.Decision{&decision}, - Events: []*models.Event{}, - EventsCount: &eventsCount, - Leakspeed: &leakSpeed, - Message: &addReason, - ScenarioHash: &empty, - Scenario: &addReason, - ScenarioVersion: &empty, - Simulated: &simulated, - // setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes - Source: &models.Source{ - AsName: empty, - AsNumber: empty, - Cn: empty, - IP: addValue, - Range: "", - Scope: &addScope, - Value: &addValue, - }, - StartAt: &startAt, - StopAt: &stopAt, - CreatedAt: createdAt, - } - alerts = append(alerts, &alert) + return cmd +} - _, _, err = Client.Alerts.Add(context.Background(), alerts) - if err != nil { - return err - } +func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { + var err error - log.Info("Decision successfully added") + /*take care of shorthand options*/ + if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + return err + } - return nil - }, + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil } - cmd.Flags().SortFlags = false - cmd.Flags().StringVarP(&addIP, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(&addRange, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - cmd.Flags().StringVarP(&addDuration, "duration", "d", "4h", "Decision duration (ie. 1h,4h,30m)") - cmd.Flags().StringVarP(&addValue, "value", "v", "", "The value (ie. --scope username --value foobar)") - cmd.Flags().StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") - cmd.Flags().StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") - cmd.Flags().StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") + if *delFilter.OriginEquals == "" { + delFilter.OriginEquals = nil + } - return cmd + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil + } + + if *delFilter.ScenarioEquals == "" { + delFilter.ScenarioEquals = nil + } + + if *delFilter.TypeEquals == "" { + delFilter.TypeEquals = nil + } + + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil + } + + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil + } + + if contained != nil && *contained { + delFilter.Contains = new(bool) + } + + var decisions *models.DeleteDecisionResponse + + if delDecisionID == "" { + decisions, _, err = cli.client.Decisions.Delete(context.Background(), delFilter) + if err != nil { + return fmt.Errorf("unable to delete decisions: %w", err) + } + } else { + if _, err = strconv.Atoi(delDecisionID); err != nil { + return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) + } + + decisions, _, err = cli.client.Decisions.DeleteOne(context.Background(), delDecisionID) + if err != nil { + return fmt.Errorf("unable to delete decision: %w", err) + } + } + + log.Infof("%s decision(s) deleted", decisions.NbDeleted) + + return nil } func (cli *cliDecisions) newDeleteCmd() *cobra.Command { @@ -462,76 +530,29 @@ cscli decisions delete --origin lists --scenario list_name *delFilter.TypeEquals == "" && *delFilter.IPEquals == "" && *delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" && *delFilter.OriginEquals == "" && delDecisionID == "" { - cmd.Usage() + _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } return nil }, RunE: func(_ *cobra.Command, _ []string) error { - var err error - var decisions *models.DeleteDecisionResponse - - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { - return err - } - if *delFilter.ScopeEquals == "" { - delFilter.ScopeEquals = nil - } - if *delFilter.OriginEquals == "" { - delFilter.OriginEquals = nil - } - if *delFilter.ValueEquals == "" { - delFilter.ValueEquals = nil - } - if *delFilter.ScenarioEquals == "" { - delFilter.ScenarioEquals = nil - } - if *delFilter.TypeEquals == "" { - delFilter.TypeEquals = nil - } - if *delFilter.IPEquals == "" { - delFilter.IPEquals = nil - } - if *delFilter.RangeEquals == "" { - delFilter.RangeEquals = nil - } - if contained != nil && *contained { - delFilter.Contains = new(bool) - } - - if delDecisionID == "" { - decisions, _, err = Client.Decisions.Delete(context.Background(), delFilter) - if err != nil { - return fmt.Errorf("unable to delete decisions: %w", err) - } - } else { - if _, err = strconv.Atoi(delDecisionID); err != nil { - return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) - } - decisions, _, err = Client.Decisions.DeleteOne(context.Background(), delDecisionID) - if err != nil { - return fmt.Errorf("unable to delete decision: %w", err) - } - } - log.Infof("%s decision(s) deleted", decisions.NbDeleted) - - return nil + return cli.delete(delFilter, delDecisionID, contained) }, } - cmd.Flags().SortFlags = false - cmd.Flags().StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - cmd.Flags().StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") - cmd.Flags().StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - cmd.Flags().StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVar(delFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - - cmd.Flags().StringVar(&delDecisionID, "id", "", "decision id") - cmd.Flags().BoolVar(&delDecisionAll, "all", false, "delete all decisions") - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + flags := cmd.Flags() + flags.SortFlags = false + flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") + flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") + flags.StringVar(delFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + + flags.StringVar(&delDecisionID, "id", "", "decision id") + flags.BoolVar(&delDecisionAll, "all", false, "delete all decisions") + flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 1df9d1ae4a1..338c1b7fb3e 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -122,7 +122,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } var ( - content []byte + content []byte fin *os.File ) @@ -224,7 +224,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { Decisions: chunk, } - _, _, err = Client.Alerts.Add(context.Background(), models.AddAlertsRequest{&importAlert}) + _, _, err = cli.client.Alerts.Add(context.Background(), models.AddAlertsRequest{&importAlert}) if err != nil { return err } @@ -273,7 +273,7 @@ $ echo "1.2.3.4" | cscli decisions import -i - --format values flags.Int("batch", 0, "Split import in batches of N decisions") flags.String("format", "", "Input format: 'json', 'csv' or 'values' (each line is a value, no headers)") - cmd.MarkFlagRequired("input") + _ = cmd.MarkFlagRequired("input") return cmd } diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index fcef137e424..c2c92140fd6 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -50,7 +50,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return fmt.Errorf("failed to get scenarios: %w", err) } - Client, err = apiclient.NewDefaultClient(apiURL, + client, err := apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, cwversion.UserAgent(), nil) @@ -66,7 +66,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: scenarios, } - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { return err } @@ -322,7 +322,8 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user flags := cmd.Flags() flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") - cmd.MarkFlagRequired("value") + + _ = cmd.MarkFlagRequired("value") return cmd } diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index f0213a8a236..be6e905356e 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -31,7 +31,6 @@ teardown() { @test "'decisions add' requires parameters" { rune -1 cscli decisions add - assert_line "Usage:" assert_stderr --partial "missing arguments, a value is required (--ip, --range or --scope and --value)" rune -1 cscli decisions add -o json From 1378e16578dfae6b900823e6c89a62e01066ec47 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Thu, 6 Jun 2024 11:19:05 +0200 Subject: [PATCH 166/318] fix test 01_cscli by avoiding discrepancies in cscli explain (#3059) * fix hub installed items in pacakge tests --- test/bin/remove-all-hub-items | 20 ++++++++++++++++++++ test/lib/config/config-global | 1 + 2 files changed, 21 insertions(+) create mode 100755 test/bin/remove-all-hub-items diff --git a/test/bin/remove-all-hub-items b/test/bin/remove-all-hub-items new file mode 100755 index 00000000000..981602b775a --- /dev/null +++ b/test/bin/remove-all-hub-items @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eu + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck disable=SC1091 +. "${THIS_DIR}/../.environment.sh" + +# pre-download everything but don't install anything + +echo "Pre-downloading Hub content..." + +types=$("$CSCLI" hub types -o raw) + +for itemtype in $types; do + "$CSCLI" "$itemtype" remove --all --force +done + +echo " done." diff --git a/test/lib/config/config-global b/test/lib/config/config-global index 0caf0591f7d..f77fb3e27bc 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -71,6 +71,7 @@ make_init_data() { ./instance-db setup ./bin/preload-hub-items + ./bin/remove-all-hub-items # when installed packages are always using sqlite, so no need to regenerate # local credz for sqlite From b38b959f71daea46bd6e8037e4c73a6f4df7919d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:51:49 +0200 Subject: [PATCH 167/318] enable linter "stylecheck" (#3060) * enable linter "stylecheck" * stylecheck: omit redundant types --- .golangci.yml | 11 ++++++++++- pkg/exprhelpers/debugger.go | 9 ++++----- pkg/exprhelpers/helpers.go | 4 ++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index fd237c95bcc..5565ee7466e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -111,6 +111,15 @@ linters-settings: - pkg: "gopkg.in/yaml.v2" desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" + stylecheck: + checks: + - all + - -ST1003 # should not use underscores in Go names; ... + - -ST1005 # error strings should not be capitalized + - -ST1012 # error var ... should have name of the form ErrFoo + - -ST1016 # methods on the same type should have the same receiver name + - -ST1022 # comment on exported var ... should be of the form ... + revive: ignore-generated-header: true severity: error @@ -289,6 +298,7 @@ linters: # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. + # - stylecheck # Stylecheck is a replacement for golint # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 # - testableexamples # linter checks if examples are testable (have an expected output) # - testifylint # Checks usage of github.com/stretchr/testify. @@ -358,7 +368,6 @@ linters: - forbidigo # Forbids identifiers - gochecknoglobals # Check that no global variables exist. - goconst # Finds repeated strings that could be replaced by a constant - - stylecheck # Stylecheck is a replacement for golint - tagliatelle # Checks the struct tags. - varnamelen # checks that the length of a variable's name matches its scope diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 432bb737eae..5ab2fc48c70 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -379,13 +379,12 @@ func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry // TBD: Based on the level of the logger (ie. trace vs debug) we could decide to add more low level instructions (pop, push, etc.) func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]OpOutput, any, error) { - - var outputs []OpOutput = []OpOutput{} - var buf strings.Builder - var erp ExprRuntimeDebug = ExprRuntimeDebug{ + outputs := []OpOutput{} + erp := ExprRuntimeDebug{ Logger: logger, } - var debugErr chan error = make(chan error) + debugErr := make(chan error) + var buf strings.Builder vm := vm.Debug() done := false program.Opcodes(&buf) diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index e0a3a523c3e..5c041aa2886 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -243,8 +243,8 @@ func Distinct(params ...any) (any, error) { return []interface{}{}, nil } - var exists map[any]bool = make(map[any]bool) - var ret []interface{} = make([]interface{}, 0) + exists := make(map[any]bool) + ret := make([]interface{}, 0) for _, val := range array { if _, ok := exists[val]; !ok { From 7d75290a0f7deac6eca11c93a87631b2265a2ce4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:53:30 +0200 Subject: [PATCH 168/318] Use cmp.Or from go 1.22 (#3058) --- cmd/crowdsec-cli/itemcli.go | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 66a3f4fc051..367e22f2962 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -1,6 +1,7 @@ package main import ( + "cmp" "errors" "fmt" "os" @@ -13,8 +14,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/go-cs-lib/coalesce" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -44,8 +43,8 @@ type cliItem struct { func (cli cliItem) NewCommand() *cobra.Command { cmd := &cobra.Command{ - Use: coalesce.String(cli.help.use, cli.name+" [item]..."), - Short: coalesce.String(cli.help.short, "Manage hub "+cli.name), + Use: cmp.Or(cli.help.use, cli.name+" [item]..."), + Short: cmp.Or(cli.help.short, "Manage hub "+cli.name), Long: cli.help.long, Example: cli.help.example, Args: cobra.MinimumNArgs(1), @@ -105,9 +104,9 @@ func (cli cliItem) newInstallCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.installHelp.use, "install [item]..."), - Short: coalesce.String(cli.installHelp.short, "Install given "+cli.oneOrMore), - Long: coalesce.String(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), + Use: cmp.Or(cli.installHelp.use, "install [item]..."), + Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), + Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), Example: cli.installHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, @@ -231,9 +230,9 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.removeHelp.use, "remove [item]..."), - Short: coalesce.String(cli.removeHelp.short, "Remove given "+cli.oneOrMore), - Long: coalesce.String(cli.removeHelp.long, "Remove one or more "+cli.name), + Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), + Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), Example: cli.removeHelp.example, Aliases: []string{"delete"}, DisableAutoGenTag: true, @@ -327,9 +326,9 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."), - Short: coalesce.String(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), - Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), + Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), + Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), + Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), Example: cli.upgradeHelp.example, DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { @@ -404,9 +403,9 @@ func (cli cliItem) newInspectCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."), - Short: coalesce.String(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), - Long: coalesce.String(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), + Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), + Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), Example: cli.inspectHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, @@ -449,9 +448,9 @@ func (cli cliItem) newListCmd() *cobra.Command { var all bool cmd := &cobra.Command{ - Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"), - Short: coalesce.String(cli.listHelp.short, "List "+cli.oneOrMore), - Long: coalesce.String(cli.listHelp.long, "List of installed/available/specified "+cli.name), + Use: cmp.Or(cli.listHelp.use, "list [item... | -a]"), + Short: cmp.Or(cli.listHelp.short, "List "+cli.oneOrMore), + Long: cmp.Or(cli.listHelp.long, "List of installed/available/specified "+cli.name), Example: cli.listHelp.example, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { From 6ef2396c91110bf52e7e2a1900993ea189eaf188 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 16:03:32 +0200 Subject: [PATCH 169/318] enable linter: revive (blank-imports) (#3062) * lint: revive (blank-imports) require a comment to justify blank imports * typo --- .golangci.yml | 2 -- pkg/acquisition/modules/appsec/appsec_runner.go | 1 + pkg/database/database.go | 1 + pkg/parser/stage.go | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 5565ee7466e..ba54bfbbd89 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -127,8 +127,6 @@ linters-settings: rules: - name: add-constant disabled: true - - name: blank-imports - disabled: true - name: cognitive-complexity disabled: true - name: comment-spacings diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index cc7264aa2c8..c015db74d82 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -14,6 +14,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + // load body processors via init() _ "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec/bodyprocessors" ) diff --git a/pkg/database/database.go b/pkg/database/database.go index 357077e7d6f..fc89aa5cd4d 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -8,6 +8,7 @@ import ( "os" entsql "entgo.io/ent/dialect/sql" + // load database backends _ "github.com/go-sql-driver/mysql" _ "github.com/jackc/pgx/v4/stdlib" _ "github.com/mattn/go-sqlite3" diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index 1eac2b83ede..fe538023b61 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + // enable profiling _ "net/http/pprof" "os" "sort" From 550d422d2399a81615e7073127ae813ca9a3b7fd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 12:01:08 +0200 Subject: [PATCH 170/318] lint revive(deep-exit): refactor cmd/crowdsec (#3063) * lint revive(deep-exit): refactor cmd/crowdsec * lint --- .golangci.yml | 4 ++ cmd/crowdsec/crowdsec.go | 103 ++++----------------------------------- cmd/crowdsec/dump.go | 56 +++++++++++++++++++++ cmd/crowdsec/serve.go | 3 +- 4 files changed, 72 insertions(+), 94 deletions(-) create mode 100644 cmd/crowdsec/dump.go diff --git a/.golangci.yml b/.golangci.yml index ba54bfbbd89..c096ef5f88a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -497,6 +497,10 @@ issues: - revive path: pkg/metabase/metabase.go + - linters: + - revive + path: pkg/metabase/container.go + - linters: - revive path: cmd/crowdsec-cli/copyfile.go diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index d226e3a5796..0fd269b3537 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -4,12 +4,10 @@ import ( "context" "fmt" "os" - "path/filepath" "sync" "time" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/trace" @@ -34,9 +32,8 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] } err = exprhelpers.GeoIPInit(hub.GetDataDir()) - if err != nil { - //GeoIP databases are not mandatory, do not make crowdsec fail if they are not present + // GeoIP databases are not mandatory, do not make crowdsec fail if they are not present log.Warnf("unable to initialize GeoIP: %s", err) } @@ -79,7 +76,6 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { // this error will never happen as parser.Parse is not able to return errors - log.Fatalf("starting parse error : %s", err) return err } @@ -96,7 +92,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H bucketsTomb.Go(func() error { bucketWg.Add(1) - /*restore previous state as well if present*/ + // restore previous state as well if present if cConfig.Crowdsec.BucketStateFile != "" { log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile) @@ -109,12 +105,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H bucketsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runPour") - if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil { - log.Fatalf("starting pour error : %s", err) - return err - } - - return nil + return runPour(inputEventChan, holders, buckets, cConfig) }) } bucketWg.Done() @@ -140,12 +131,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") - if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient); err != nil { - log.Fatalf("starting outputs error : %s", err) - return err - } - - return nil + return runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient) }) } outputWg.Done() @@ -190,7 +176,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub } }() - /*we should stop in two cases : + /* we should stop in two cases : - crowdsecTomb has been Killed() : it might be shutdown or reload, so stop - acquisTomb is dead, it means that we were in "cat" mode and files are done reading, quit */ @@ -198,15 +184,15 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub log.Debugf("Shutting down crowdsec routines") if err := ShutdownCrowdsecRoutines(); err != nil { - log.Fatalf("unable to shutdown crowdsec routines: %s", err) + return fmt.Errorf("unable to shutdown crowdsec routines: %w", err) } log.Debugf("everything is dead, return crowdsecTomb") if dumpStates { - dumpParserState() - dumpOverflowState() - dumpBucketsPour() + if err := dumpAllStates(); err != nil { + log.Fatal(err) + } os.Exit(0) } @@ -214,80 +200,11 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub }) } -func dumpBucketsPour() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) - if err != nil { - log.Fatalf("open: %s", err) - } - - out, err := yaml.Marshal(leaky.BucketPourCache) - if err != nil { - log.Fatalf("marshal: %s", err) - } - - b, err := fd.Write(out) - if err != nil { - log.Fatalf("write: %s", err) - } - - log.Tracef("wrote %d bytes", b) - - if err := fd.Close(); err != nil { - log.Fatalf(" close: %s", err) - } -} - -func dumpParserState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) - if err != nil { - log.Fatalf("open: %s", err) - } - - out, err := yaml.Marshal(parser.StageParseCache) - if err != nil { - log.Fatalf("marshal: %s", err) - } - - b, err := fd.Write(out) - if err != nil { - log.Fatalf("write: %s", err) - } - - log.Tracef("wrote %d bytes", b) - - if err := fd.Close(); err != nil { - log.Fatalf(" close: %s", err) - } -} - -func dumpOverflowState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) - if err != nil { - log.Fatalf("open: %s", err) - } - - out, err := yaml.Marshal(bucketOverflows) - if err != nil { - log.Fatalf("marshal: %s", err) - } - - b, err := fd.Write(out) - if err != nil { - log.Fatalf("write: %s", err) - } - - log.Tracef("wrote %d bytes", b) - - if err := fd.Close(); err != nil { - log.Fatalf(" close: %s", err) - } -} - func waitOnTomb() { for { select { case <-acquisTomb.Dead(): - /*if it's acquisition dying it means that we were in "cat" mode. + /* if it's acquisition dying it means that we were in "cat" mode. while shutting down, we need to give time for all buckets to process in flight data*/ log.Info("Acquisition is finished, shutting down") /* diff --git a/cmd/crowdsec/dump.go b/cmd/crowdsec/dump.go new file mode 100644 index 00000000000..33c65878b11 --- /dev/null +++ b/cmd/crowdsec/dump.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" +) + +func dumpAllStates() error { + log.Debugf("Dumping parser+bucket states to %s", parser.DumpFolder) + + if err := dumpState( + filepath.Join(parser.DumpFolder, "parser-dump.yaml"), + parser.StageParseCache, + ); err != nil { + return fmt.Errorf("while dumping parser state: %w", err) + } + + if err := dumpState( + filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), + bucketOverflows, + ); err != nil { + return fmt.Errorf("while dumping bucket overflow state: %w", err) + } + + if err := dumpState( + filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), + leaky.BucketPourCache, + ); err != nil { + return fmt.Errorf("while dumping bucket pour state: %w", err) + } + + return nil +} + +func dumpState(destPath string, obj any) error { + dir := filepath.Dir(destPath) + + err := os.MkdirAll(dir, 0o755) + if err != nil { + return err + } + + out, err := yaml.Marshal(obj) + if err != nil { + return err + } + + return os.WriteFile(destPath, out, 0o666) +} diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 497215d74a1..da79e50c427 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -390,7 +390,8 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { if flags.TestMode { log.Infof("Configuration test done") pluginBroker.Kill() - os.Exit(0) + + return nil } if cConfig.Common != nil && cConfig.Common.Daemonize { From d3974894fc8976f385ae4768420e739a3ebf82ee Mon Sep 17 00:00:00 2001 From: MazzMa Date: Fri, 7 Jun 2024 15:58:16 +0200 Subject: [PATCH 171/318] add: go reference badge (#3066) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 6428c3a8053..a900f0ee514 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ +Go Reference From 72b6da99258afb781430642d0c72c294c6c9888c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 16:53:23 +0200 Subject: [PATCH 172/318] enable linter: revive (early-return) (#3051) * enable linter: revive (early-return) * lint --- .golangci.yml | 4 +--- pkg/acquisition/modules/s3/s3.go | 22 +++++++++---------- pkg/apiserver/papi.go | 17 ++++++++------- pkg/appsec/appsec_rule/modsecurity.go | 31 +++++++++++++-------------- pkg/appsec/coraza_logger.go | 11 +++++----- pkg/csplugin/hclog_adapter.go | 5 ++--- pkg/exprhelpers/crowdsec_cti.go | 7 +++--- pkg/parser/parsing_test.go | 27 +++++++++++------------ 8 files changed, 58 insertions(+), 66 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c096ef5f88a..1ec386183e1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -102,7 +102,6 @@ linters-settings: - "!**/pkg/csplugin/broker.go" - "!**/pkg/leakybucket/buckets_test.go" - "!**/pkg/leakybucket/manager_load.go" - - "!**/pkg/metabase/metabase.go" - "!**/pkg/parser/node.go" - "!**/pkg/parser/node_test.go" - "!**/pkg/parser/parsing_test.go" @@ -139,8 +138,6 @@ linters-settings: disabled: true - name: defer disabled: true - - name: early-return - disabled: true - name: empty-block disabled: true - name: empty-lines @@ -382,6 +379,7 @@ issues: exclude-dirs: - pkg/time/rate + - pkg/metabase exclude-files: - pkg/yamlpatch/merge.go diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 98b2e685cd0..d1bf881079a 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -131,7 +131,6 @@ func (s *S3Source) newS3Client() error { } sess, err := session.NewSessionWithOptions(options) - if err != nil { return fmt.Errorf("failed to create aws session: %w", err) } @@ -146,7 +145,7 @@ func (s *S3Source) newS3Client() error { s.s3Client = s3.New(sess, config) if s.s3Client == nil { - return fmt.Errorf("failed to create S3 client") + return errors.New("failed to create S3 client") } return nil @@ -167,7 +166,7 @@ func (s *S3Source) newSQSClient() error { } if sess == nil { - return fmt.Errorf("failed to create aws session") + return errors.New("failed to create aws session") } config := aws.NewConfig() if s.Config.AwsRegion != "" { @@ -178,7 +177,7 @@ func (s *S3Source) newSQSClient() error { } s.sqsClient = sqs.New(sess, config) if s.sqsClient == nil { - return fmt.Errorf("failed to create SQS client") + return errors.New("failed to create SQS client") } return nil } @@ -251,16 +250,15 @@ func (s *S3Source) listPoll() error { continue } for i := len(bucketObjects) - 1; i >= 0; i-- { - if bucketObjects[i].LastModified.After(lastObjectDate) { - newObject = true - logger.Debugf("Found new object %s", *bucketObjects[i].Key) - s.readerChan <- S3Object{ - Bucket: s.Config.BucketName, - Key: *bucketObjects[i].Key, - } - } else { + if !bucketObjects[i].LastModified.After(lastObjectDate) { break } + newObject = true + logger.Debugf("Found new object %s", *bucketObjects[i].Key) + s.readerChan <- S3Object{ + Bucket: s.Config.BucketName, + Key: *bucketObjects[i].Key, + } } if newObject { lastObjectDate = *bucketObjects[len(bucketObjects)-1].LastModified diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 169f1441df4..0d0fd0ecd42 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -141,17 +141,18 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { return errors.New("no source user in header message, skipping") } - if operationFunc, ok := operationMap[message.Header.OperationType]; ok { - logger.Debugf("Calling operation '%s'", message.Header.OperationType) - - err := operationFunc(message, p, sync) - if err != nil { - return fmt.Errorf("'%s %s failed: %w", message.Header.OperationType, message.Header.OperationCmd, err) - } - } else { + operationFunc, ok := operationMap[message.Header.OperationType] + if !ok { return fmt.Errorf("operation '%s' unknown, continue", message.Header.OperationType) } + logger.Debugf("Calling operation '%s'", message.Header.OperationType) + + err := operationFunc(message, p, sync) + if err != nil { + return fmt.Errorf("'%s %s failed: %w", message.Header.OperationType, message.Header.OperationCmd, err) + } + return nil } diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index 03a840cf436..8f58a9589ca 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -1,6 +1,7 @@ package appsec_rule import ( + "errors" "fmt" "hash/fnv" "strings" @@ -67,9 +68,7 @@ var bodyTypeMatch map[string]string = map[string]string{ } func (m *ModsecurityRule) Build(rule *CustomRule, appsecRuleName string) (string, []uint32, error) { - rules, err := m.buildRules(rule, appsecRuleName, false, 0, 0) - if err != nil { return "", nil, err } @@ -99,7 +98,7 @@ func (m *ModsecurityRule) buildRules(rule *CustomRule, appsecRuleName string, an ret := make([]string, 0) if len(rule.And) != 0 && len(rule.Or) != 0 { - return nil, fmt.Errorf("cannot have both 'and' and 'or' in the same rule") + return nil, errors.New("cannot have both 'and' and 'or' in the same rule") } if rule.And != nil { @@ -166,15 +165,15 @@ func (m *ModsecurityRule) buildRules(rule *CustomRule, appsecRuleName string, an r.WriteByte(' ') if rule.Match.Type != "" { - if match, ok := matchMap[rule.Match.Type]; ok { - prefix := "" - if rule.Match.Not { - prefix = "!" - } - r.WriteString(fmt.Sprintf(`"%s%s %s"`, prefix, match, rule.Match.Value)) - } else { + match, ok := matchMap[rule.Match.Type] + if !ok { return nil, fmt.Errorf("unknown match type '%s'", rule.Match.Type) } + prefix := "" + if rule.Match.Not { + prefix = "!" + } + r.WriteString(fmt.Sprintf(`"%s%s %s"`, prefix, match, rule.Match.Value)) } //Should phase:2 be configurable? @@ -186,20 +185,20 @@ func (m *ModsecurityRule) buildRules(rule *CustomRule, appsecRuleName string, an continue } r.WriteByte(',') - if mappedTransform, ok := transformMap[transform]; ok { - r.WriteString(mappedTransform) - } else { + mappedTransform, ok := transformMap[transform] + if !ok { return nil, fmt.Errorf("unknown transform '%s'", transform) } + r.WriteString(mappedTransform) } } if rule.BodyType != "" { - if mappedBodyType, ok := bodyTypeMatch[rule.BodyType]; ok { - r.WriteString(fmt.Sprintf(",ctl:requestBodyProcessor=%s", mappedBodyType)) - } else { + mappedBodyType, ok := bodyTypeMatch[rule.BodyType] + if !ok { return nil, fmt.Errorf("unknown body type '%s'", rule.BodyType) } + r.WriteString(fmt.Sprintf(",ctl:requestBodyProcessor=%s", mappedBodyType)) } if and { diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 7229f038b92..2b7f85d4e46 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -90,14 +90,13 @@ func (e *crzLogEvent) Bool(key string, b bool) dbg.Event { func (e *crzLogEvent) Int(key string, i int) dbg.Event { if e.muted { - // this allows us to have per-rule debug logging - if key == "rule_id" && GetRuleDebug(i) { - e.muted = false - e.fields = map[string]interface{}{} - e.level = log.DebugLevel - } else { + if key != "rule_id" || !GetRuleDebug(i) { return e } + // this allows us to have per-rule debug logging + e.muted = false + e.fields = map[string]interface{}{} + e.level = log.DebugLevel } e.fields[key] = i diff --git a/pkg/csplugin/hclog_adapter.go b/pkg/csplugin/hclog_adapter.go index 58190684ebc..44a22463709 100644 --- a/pkg/csplugin/hclog_adapter.go +++ b/pkg/csplugin/hclog_adapter.go @@ -221,11 +221,10 @@ func merge(dst map[string]interface{}, k, v interface{}) { func safeString(str fmt.Stringer) (s string) { defer func() { if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { + if v := reflect.ValueOf(str); v.Kind() != reflect.Ptr || !v.IsNil() { panic(panicVal) } + s = "NULL" } }() diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 59a239722e3..268979ee243 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -86,12 +86,11 @@ func CrowdsecCTI(params ...any) (any, error) { if val, err := CTICache.Get(ip); err == nil && val != nil { ctiClient.Logger.Debugf("cti cache fetch for %s", ip) ret, ok := val.(*cticlient.SmokeItem) - if !ok { - ctiClient.Logger.Warningf("CrowdsecCTI: invalid type in cache, removing") - CTICache.Remove(ip) - } else { + if ok { return ret, nil } + ctiClient.Logger.Warningf("CrowdsecCTI: invalid type in cache, removing") + CTICache.Remove(ip) } if !CTIBackOffUntil.IsZero() && time.Now().Before(CTIBackOffUntil) { diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 3193631f4dd..f142e1bc29a 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -278,26 +278,25 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { for expKey, expVal := range expectMaps[mapIdx] { - if outVal, ok := outMaps[mapIdx][expKey]; ok { - if outVal == expVal { //ok entry - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) - } - valid = true - } else { //mismatch entry - if debug { - retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) - } - valid = false - goto checkFinished - } - } else { //missing entry + outVal, ok := outMaps[mapIdx][expKey] + if !ok { if debug { retInfo = append(retInfo, fmt.Sprintf("missing entry %s[%s]", outLabels[mapIdx], expKey)) } valid = false goto checkFinished } + if outVal != expVal { //ok entry + if debug { + retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) + } + valid = false + goto checkFinished + } + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) + } + valid = true } } checkFinished: From cad760e605b4f6140b4cdee8665a0951c5b212cd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 16:56:02 +0200 Subject: [PATCH 173/318] pkg/cwhub: use explicit context for version check, index update (#3064) * cscli refactor: explicit context for hub (version lookup) * change order of version checks * pkg/cwhub: split NewHub() + Update() + Load() * cscli refactor: explicit context for hub (index update) * updated pkg/cwhub docs * lint --- cmd/crowdsec-cli/config_restore.go | 13 +++--- cmd/crowdsec-cli/hub.go | 27 ++++++++---- cmd/crowdsec-cli/itemcli.go | 25 +++++------ cmd/crowdsec-cli/require/branch.go | 66 ++++++++++++++++++++++++----- cmd/crowdsec-cli/require/require.go | 13 ++++-- cmd/crowdsec-cli/setup.go | 2 +- cmd/crowdsec/serve.go | 16 +++++-- pkg/cwhub/cwhub_test.go | 11 ++++- pkg/cwhub/doc.go | 29 ++++++++++--- pkg/cwhub/hub.go | 41 ++++++++++-------- pkg/cwhub/hub_test.go | 15 +++++-- pkg/cwhub/itemupgrade_test.go | 38 +++++++++++++---- pkg/cwhub/remote.go | 4 +- pkg/cwversion/version.go | 28 +----------- pkg/hubtest/hubtest.go | 16 +++++-- pkg/hubtest/hubtest_item.go | 8 +++- pkg/leakybucket/buckets_test.go | 14 ++++-- test/bats/20_hub_items.bats | 2 +- 18 files changed, 244 insertions(+), 124 deletions(-) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index ee7179b73c5..6147a7518c2 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" "os" @@ -13,10 +14,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (cli *cliConfig) restoreHub(dirPath string) error { +func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(cfg), nil) + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), nil) if err != nil { return err } @@ -126,7 +127,7 @@ func (cli *cliConfig) restoreHub(dirPath string) error { - Tainted/local/out-of-date scenarios, parsers, postoverflows and collections - Acquisition files (acquis.yaml, acquis.d/*.yaml) */ -func (cli *cliConfig) restore(dirPath string) error { +func (cli *cliConfig) restore(ctx context.Context, dirPath string) error { var err error cfg := cli.cfg() @@ -237,7 +238,7 @@ func (cli *cliConfig) restore(dirPath string) error { } } - if err = cli.restoreHub(dirPath); err != nil { + if err = cli.restoreHub(ctx, dirPath); err != nil { return fmt.Errorf("failed to restore hub config: %w", err) } @@ -258,10 +259,10 @@ func (cli *cliConfig) newRestoreCmd() *cobra.Command { - Backup of API credentials (local API and online API)`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { dirPath := args[0] - if err := cli.restore(dirPath); err != nil { + if err := cli.restore(cmd.Context(), dirPath); err != nil { return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 318dd018e06..cf9f7f282a4 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" @@ -98,16 +99,24 @@ func (cli *cliHub) newListCmd() *cobra.Command { return cmd } -func (cli *cliHub) update() error { +func (cli *cliHub) update(ctx context.Context) error { local := cli.cfg().Hub - remote := require.RemoteHub(cli.cfg()) + remote := require.RemoteHub(ctx, cli.cfg()) // don't use require.Hub because if there is no index file, it would fail - hub, err := cwhub.NewHub(local, remote, true, log.StandardLogger()) + hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) if err != nil { + return err + } + + if err := hub.Update(ctx); err != nil { return fmt.Errorf("failed to update hub: %w", err) } + if err := hub.Load(); err != nil { + return fmt.Errorf("failed to load hub: %w", err) + } + for _, v := range hub.Warnings { log.Info(v) } @@ -124,16 +133,16 @@ Fetches the .index.json file from the hub, containing the list of available conf `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.update() + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.update(cmd.Context()) }, } return cmd } -func (cli *cliHub) upgrade(force bool) error { - hub, err := require.Hub(cli.cfg(), require.RemoteHub(cli.cfg()), log.StandardLogger()) +func (cli *cliHub) upgrade(ctx context.Context, force bool) error { + hub, err := require.Hub(cli.cfg(), require.RemoteHub(ctx, cli.cfg()), log.StandardLogger()) if err != nil { return err } @@ -176,8 +185,8 @@ Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.upgrade(force) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.upgrade(cmd.Context(), force) }, } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 367e22f2962..dedbab3b914 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -2,6 +2,7 @@ package main import ( "cmp" + "context" "errors" "fmt" "os" @@ -61,10 +62,10 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error { +func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool, force bool, ignoreError bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } @@ -113,8 +114,8 @@ func (cli cliItem) newInstallCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.install(args, downloadOnly, force, ignoreError) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), args, downloadOnly, force, ignoreError) }, } @@ -252,10 +253,10 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { return cmd } -func (cli cliItem) upgrade(args []string, force bool, all bool) error { +func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } @@ -334,8 +335,8 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.upgrade(args, force, all) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(cmd.Context(), args, force, all) }, } @@ -346,7 +347,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { return cmd } -func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error { +func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { cfg := cli.cfg() if rev && !diff { @@ -360,7 +361,7 @@ func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMet remote := (*cwhub.RemoteHubCfg)(nil) if diff { - remote = require.RemoteHub(cfg) + remote = require.RemoteHub(ctx, cfg) } hub, err := require.Hub(cfg, remote, log.StandardLogger()) @@ -412,8 +413,8 @@ func (cli cliItem) newInspectCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.inspect(args, url, diff, rev, noMetrics) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) }, } diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 6fcaaacea2d..503cb6d2326 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -3,27 +3,76 @@ package require // Set the appropriate hub branch according to config settings and crowdsec version import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + log "github.com/sirupsen/logrus" "golang.org/x/mod/semver" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) -func chooseBranch(cfg *csconfig.Config) string { +// lookupLatest returns the latest crowdsec version based on github +func lookupLatest(ctx context.Context) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + url := "https://version.crowdsec.net/latest" + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("unable to create request for %s: %w", url, err) + } + + client := &http.Client{} + + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("unable to send request to %s: %w", url, err) + } + defer resp.Body.Close() + + latest := make(map[string]any) + + if err := json.NewDecoder(resp.Body).Decode(&latest); err != nil { + return "", fmt.Errorf("unable to decode response from %s: %w", url, err) + } + + if _, ok := latest["name"]; !ok { + return "", fmt.Errorf("unable to find 'name' key in response from %s", url) + } + + name, ok := latest["name"].(string) + if !ok { + return "", fmt.Errorf("unable to convert 'name' key to string in response from %s", url) + } + + return name, nil +} + +func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { // this was set from config.yaml or flag if cfg.Cscli.HubBranch != "" { log.Debugf("Hub override from config: branch '%s'", cfg.Cscli.HubBranch) return cfg.Cscli.HubBranch } - latest, err := cwversion.Latest() + latest, err := lookupLatest(ctx) if err != nil { log.Warningf("Unable to retrieve latest crowdsec version: %s, using hub branch 'master'", err) return "master" } csVersion := cwversion.VersionStrip() + if csVersion == "" { + log.Warning("Crowdsec version is not set, using hub branch 'master'") + return "master" + } + if csVersion == latest { log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", csVersion) return "master" @@ -35,22 +84,17 @@ func chooseBranch(cfg *csconfig.Config) string { return "master" } - if csVersion == "" { - log.Warning("Crowdsec version is not set, using hub branch 'master'") - return "master" - } - log.Warnf("A new CrowdSec release is available (%s). "+ "Your version is '%s'. Please update it to use new parsers/scenarios/collections.", latest, csVersion) + return csVersion } - // HubBranch sets the branch (in cscli config) and returns its value // It can be "master", or the branch corresponding to the current crowdsec version, or the value overridden in config/flag -func HubBranch(cfg *csconfig.Config) string { - branch := chooseBranch(cfg) +func HubBranch(ctx context.Context, cfg *csconfig.Config) string { + branch := chooseBranch(ctx, cfg) cfg.Cscli.HubBranch = branch diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 708b2d1c7a2..3ff66254466 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -1,6 +1,7 @@ package require import ( + "context" "errors" "fmt" "io" @@ -64,14 +65,14 @@ func Notifications(c *csconfig.Config) error { } // RemoteHub returns the configuration required to download hub index and items: url, branch, etc. -func RemoteHub(c *csconfig.Config) *cwhub.RemoteHubCfg { +func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { // set branch in config, and log if necessary - branch := HubBranch(c) + branch := HubBranch(ctx, c) urlTemplate := HubURLTemplate(c) remote := &cwhub.RemoteHubCfg{ Branch: branch, URLTemplate: urlTemplate, - IndexPath: ".index.json", + IndexPath: ".index.json", } return remote @@ -91,8 +92,12 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) logger.SetOutput(io.Discard) } - hub, err := cwhub.NewHub(local, remote, false, logger) + hub, err := cwhub.NewHub(local, remote, logger) if err != nil { + return nil, err + } + + if err := hub.Load(); err != nil { return nil, fmt.Errorf("failed to read Hub index: %w. Run 'sudo cscli hub update' to download the index again", err) } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 3e12b2465dd..ad02b357b70 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -315,7 +315,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { return fmt.Errorf("while reading file %s: %w", fromFile, err) } - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) + hub, err := require.Hub(csConfig, require.RemoteHub(cmd.Context(), csConfig), log.StandardLogger()) if err != nil { return err } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index da79e50c427..6c15b2d347e 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -81,9 +81,13 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, false, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) if err != nil { - return nil, fmt.Errorf("while loading hub index: %w", err) + return nil, err + } + + if err := hub.Load(); err != nil { + return nil, err } csParsers, datasources, err := initCrowdsec(cConfig, hub) @@ -367,9 +371,13 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, false, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) if err != nil { - return fmt.Errorf("while loading hub index: %w", err) + return err + } + + if err := hub.Load(); err != nil { + return err } csParsers, datasources, err := initCrowdsec(cConfig, hub) diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 09455fd65a9..17cf258745f 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "fmt" "io" "net/http" @@ -62,7 +63,15 @@ func testHub(t *testing.T, update bool) *Hub { IndexPath: ".index.json", } - hub, err := NewHub(local, remote, update, log.StandardLogger()) + hub, err := NewHub(local, remote, log.StandardLogger()) + require.NoError(t, err) + + if update { + err := hub.Update(context.TODO()) + require.NoError(t, err) + } + + err = hub.Load() require.NoError(t, err) return hub diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 85767265048..8cbf77ba00f 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -58,12 +58,21 @@ // InstallDir: "/etc/crowdsec", // InstallDataDir: "/var/lib/crowdsec/data", // } -// hub, err := cwhub.NewHub(localHub, nil, false) +// +// hub, err := cwhub.NewHub(localHub, nil, logger) // if err != nil { // return fmt.Errorf("unable to initialize hub: %w", err) // } // -// Now you can use the hub to access the existing items: +// If the logger is nil, the item-by-item messages will be discarded, including warnings. +// After configuring the hub, you must sync its state with items on disk. +// +// err := hub.Load() +// if err != nil { +// return fmt.Errorf("unable to load hub: %w", err) +// } +// +// Now you can use the hub object to access the existing items: // // // list all the parsers // for _, parser := range hub.GetItemMap(cwhub.PARSERS) { @@ -97,8 +106,8 @@ // Branch: "master", // IndexPath: ".index.json", // } -// updateIndex := false -// hub, err := cwhub.NewHub(localHub, remoteHub, updateIndex) +// +// hub, err := cwhub.NewHub(localHub, remoteHub, logger) // if err != nil { // return fmt.Errorf("unable to initialize hub: %w", err) // } @@ -106,8 +115,14 @@ // The URLTemplate is a string that will be used to build the URL of the remote hub. It must contain two // placeholders: the branch and the file path (it will be an index or an item). // -// Setting the third parameter to true will download the latest version of the index, if available on the -// specified branch. -// There is no exported method to update the index once the hub struct is created. +// Before calling hub.Load(), you can update the index file by calling the Update() method: +// +// err := hub.Update(context.Background()) +// if err != nil { +// return fmt.Errorf("unable to update hub index: %w", err) +// } +// +// Note that the command will fail if the hub has already been synced. If you want to do it (ex. after a configuration +// change the application is notified with SIGHUP) you have to instantiate a new hub object and dispose of the old one. // package cwhub diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index e7d927f54b1..1293d6fa235 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "encoding/json" "errors" "fmt" @@ -30,10 +31,11 @@ func (h *Hub) GetDataDir() string { return h.local.InstallDataDir } -// NewHub returns a new Hub instance with local and (optionally) remote configuration, and syncs the local state. -// If updateIndex is true, the local index file is updated from the remote before reading the state of the items. +// NewHub returns a new Hub instance with local and (optionally) remote configuration. +// The hub is not synced automatically. Load() must be called to read the index, sync the local state, +// and check for unmanaged items. // All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. -func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, logger *logrus.Logger) (*Hub, error) { +func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Logger) (*Hub, error) { if local == nil { return nil, errors.New("no hub configuration found") } @@ -50,23 +52,22 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, pathIndex: make(map[string]*Item, 0), } - if updateIndex { - if err := hub.updateIndex(); err != nil { - return nil, err - } - } + return hub, nil +} - logger.Debugf("loading hub idx %s", local.HubIndexFile) +// Load reads the state of the items on disk. +func (h *Hub) Load() error { + h.logger.Debugf("loading hub idx %s", h.local.HubIndexFile) - if err := hub.parseIndex(); err != nil { - return nil, fmt.Errorf("failed to load index: %w", err) + if err := h.parseIndex(); err != nil { + return fmt.Errorf("failed to load hub index: %w", err) } - if err := hub.localSync(); err != nil { - return nil, fmt.Errorf("failed to sync items: %w", err) + if err := h.localSync(); err != nil { + return fmt.Errorf("failed to sync hub items: %w", err) } - return hub, nil + return nil } // parseIndex takes the content of an index file and fills the map of associated parsers/scenarios/collections. @@ -149,9 +150,15 @@ func (h *Hub) ItemStats() []string { return ret } -// updateIndex downloads the latest version of the index and writes it to disk if it changed. -func (h *Hub) updateIndex() error { - downloaded, err := h.remote.fetchIndex(h.local.HubIndexFile) +// Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() +// unless the hub is completely empty. +func (h *Hub) Update(ctx context.Context) error { + if h.pathIndex != nil && len(h.pathIndex) > 0 { + // if this happens, it's a bug. + return errors.New("cannot update hub after items have been loaded") + } + + downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile) if err != nil { return err } diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index d5592a16c39..13c495e2fcc 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "fmt" "os" "testing" @@ -18,7 +19,13 @@ func TestInitHubUpdate(t *testing.T) { IndexPath: ".index.json", } - _, err := NewHub(hub.local, remote, true, nil) + _, err := NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() require.NoError(t, err) } @@ -47,7 +54,7 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = tmpIndex.Name() - err = hub.updateIndex() + err = hub.Update(context.TODO()) cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") // bad domain @@ -59,7 +66,7 @@ func TestUpdateIndex(t *testing.T) { IndexPath: ".index.json", } - err = hub.updateIndex() + err = hub.Update(context.TODO()) require.NoError(t, err) // XXX: this is not failing // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") @@ -75,6 +82,6 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" - err = hub.updateIndex() + err = hub.Update(context.TODO()) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 1bd62ad63e8..d86d2094955 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -39,8 +40,14 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { IndexPath: ".index.json", } - hub, err := NewHub(hub.local, remote, true, nil) - require.NoError(t, err, "failed to download index: %s", err) + hub, err := NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) hub = getHubOrFail(t, hub.local, remote) @@ -100,8 +107,14 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.True(t, hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection").State.Installed) require.True(t, hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection").State.UpToDate) - hub, err = NewHub(hub.local, remote, true, nil) - require.NoError(t, err, "failed to download index: %s", err) + hub, err = NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) item = hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection") didUpdate, err := item.Upgrade(false) @@ -114,8 +127,11 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { // getHubOrFail refreshes the hub state (load index, sync) and returns the singleton, or fails the test. func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *RemoteHubCfg) *Hub { - hub, err := NewHub(local, remote, false, nil) - require.NoError(t, err, "failed to load hub index") + hub, err := NewHub(local, remote, nil) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) return hub } @@ -166,8 +182,14 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te // we just removed. Nor should it install the newly added scenario pushUpdateToCollectionInHub() - hub, err = NewHub(hub.local, remote, true, nil) - require.NoError(t, err, "failed to download index: %s", err) + hub, err = NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) require.False(t, hub.GetItem(SCENARIOS, "crowdsecurity/foobar_scenario").State.Installed) hub = getHubOrFail(t, hub.local, remote) diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 5e42555fa61..abb2ddae2ad 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -31,7 +31,7 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { } // fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) { +func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { if r == nil { return false, ErrNilRemoteHub } @@ -41,8 +41,6 @@ func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) { return false, fmt.Errorf("failed to build hub index request: %w", err) } - ctx := context.TODO() - downloaded, err := downloader. New(). WithHTTPClient(hubClient). diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index d13767e665b..01509833c1c 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -1,9 +1,7 @@ package cwversion import ( - "encoding/json" "fmt" - "net/http" "strings" goversion "github.com/hashicorp/go-version" @@ -27,7 +25,7 @@ func versionWithTag() string { // if the version number already contains the tag, don't duplicate it ret := version.Version - if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g" + version.Tag + "-dirty") { + if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g"+version.Tag+"-dirty") { ret += "-" + version.Tag } @@ -65,7 +63,7 @@ func VersionStrip() string { func Satisfies(strvers string, constraint string) (bool, error) { vers, err := goversion.NewVersion(strvers) if err != nil { - return false, fmt.Errorf("failed to parse '%s' : %v", strvers, err) + return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) } constraints, err := goversion.NewConstraint(constraint) @@ -79,25 +77,3 @@ func Satisfies(strvers string, constraint string) (bool, error) { return true, nil } - -// Latest return latest crowdsec version based on github -func Latest() (string, error) { - latest := make(map[string]any) - - resp, err := http.Get("https://version.crowdsec.net/latest") - if err != nil { - return "", err - } - defer resp.Body.Close() - - err = json.NewDecoder(resp.Body).Decode(&latest) - if err != nil { - return "", err - } - - if _, ok := latest["name"]; !ok { - return "", fmt.Errorf("unable to find latest release name from github api: %+v", latest) - } - - return latest["name"].(string), nil -} diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index 6610652f78a..a4ca275c310 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -93,9 +93,13 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, false, nil) + hub, err := cwhub.NewHub(local, nil, nil) if err != nil { - return HubTest{}, fmt.Errorf("unable to load hub: %s", err) + return HubTest{}, err + } + + if err := hub.Load(); err != nil { + return HubTest{}, err } return HubTest{ @@ -126,9 +130,13 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, false, nil) + hub, err := cwhub.NewHub(local, nil, nil) if err != nil { - return HubTest{}, fmt.Errorf("unable to load hub: %s", err) + return HubTest{}, err + } + + if err := hub.Load(); err != nil { + return HubTest{}, err } return HubTest{ diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 918a10f62e1..096bdcd0a02 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -110,7 +110,7 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("unmarshal: %w", err) } parserAssertFilePath := filepath.Join(testPath, ParserAssertFileName) @@ -210,11 +210,15 @@ func (t *HubTestItem) InstallHub() error { } // load installed hub - hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, false, nil) + hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, nil) if err != nil { return err } + if err := hub.Load(); err != nil { + return err + } + // install data for parsers if needed ret := hub.GetItemMap(cwhub.PARSERS) for parserName, item := range ret { diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 4bb3c96759e..989e03944c3 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -16,6 +16,7 @@ import ( "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" yaml "gopkg.in/yaml.v2" @@ -45,10 +46,11 @@ func TestBucket(t *testing.T) { InstallDataDir: testdata, } - hub, err := cwhub.NewHub(hubCfg, nil, false, nil) - if err != nil { - t.Fatalf("failed to init hub: %s", err) - } + hub, err := cwhub.NewHub(hubCfg, nil, nil) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) err = exprhelpers.Init(nil) if err != nil { @@ -197,9 +199,11 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res //just to avoid any race during ingestion of funny scenarios time.Sleep(50 * time.Millisecond) var ts time.Time + if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { t.Fatalf("Failed to unmarshal time from input event : %s", err) } + if latest_ts.IsZero() { latest_ts = ts } else if ts.After(latest_ts) { @@ -208,10 +212,12 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res in.ExpectMode = types.TIMEMACHINE log.Infof("Buckets input : %s", spew.Sdump(in)) + ok, err := PourItemToHolders(in, holders, buckets) if err != nil { t.Fatalf("Failed to pour : %s", err) } + if !ok { log.Warning("Event wasn't poured") } diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 72e09dfa268..c6dbafc0911 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -83,7 +83,7 @@ teardown() { rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json # XXX: we are on the verbose side here... rune -0 jq -r ".msg" <(stderr) - assert_output --regexp "failed to read Hub index: failed to sync items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + assert_output --regexp "failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" } @test "removing or purging an item already removed by hand" { From dd6cf2b844f801fe045dc102471064ae1ef66bdc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 17:32:52 +0200 Subject: [PATCH 174/318] pkg/cwhub: use explicit context for item install, upgrade (#3067) --- cmd/crowdsec-cli/config_restore.go | 2 +- cmd/crowdsec-cli/hub.go | 2 +- cmd/crowdsec-cli/itemcli.go | 16 ++++++++-------- cmd/crowdsec-cli/setup.go | 2 +- pkg/cwhub/cwhub_test.go | 3 ++- pkg/cwhub/dataset.go | 4 +--- pkg/cwhub/hub_test.go | 12 ++++++++---- pkg/cwhub/iteminstall.go | 5 +++-- pkg/cwhub/iteminstall_test.go | 9 +++++++-- pkg/cwhub/itemupgrade.go | 28 +++++++++++++--------------- pkg/cwhub/itemupgrade_test.go | 24 +++++++++++++++--------- pkg/hubtest/hubtest_item.go | 9 ++++++--- pkg/setup/install.go | 11 ++++++----- 13 files changed, 72 insertions(+), 55 deletions(-) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 6147a7518c2..fc3670165f8 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -50,7 +50,7 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { continue } - if err = item.Install(false, false); err != nil { + if err = item.Install(ctx, false, false); err != nil { log.Errorf("Error while installing %s : %s", toinstall, err) } } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index cf9f7f282a4..737b93d8da8 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -158,7 +158,7 @@ func (cli *cliHub) upgrade(ctx context.Context, force bool) error { log.Infof("Upgrading %s", itemType) for _, item := range items { - didUpdate, err := item.Upgrade(force) + didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index dedbab3b914..55396a10995 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -83,7 +83,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool continue } - if err := item.Install(force, downloadOnly); err != nil { + if err := item.Install(ctx, force, downloadOnly); err != nil { if !ignoreError { return fmt.Errorf("error while installing '%s': %w", item.Name, err) } @@ -270,7 +270,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b updated := 0 for _, item := range items { - didUpdate, err := item.Upgrade(force) + didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err } @@ -301,7 +301,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) } - didUpdate, err := item.Upgrade(force) + didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err } @@ -376,7 +376,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff } if diff { - fmt.Println(cli.whyTainted(hub, item, rev)) + fmt.Println(cli.whyTainted(ctx, hub, item, rev)) continue } @@ -466,7 +466,7 @@ func (cli cliItem) newListCmd() *cobra.Command { } // return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { +func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) (string, error) { if !item.State.Installed { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } @@ -477,7 +477,7 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { } defer os.Remove(dest.Name()) - _, remoteURL, err := item.FetchContentTo(dest.Name()) + _, remoteURL, err := item.FetchContentTo(ctx, dest.Name()) if err != nil { return "", err } @@ -508,7 +508,7 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { return fmt.Sprintf("%s", diff), nil } -func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { +func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { if !item.State.Installed { return fmt.Sprintf("# %s is not installed", item.FQName()) } @@ -533,7 +533,7 @@ func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) st ret = append(ret, err.Error()) } - diff, err := cli.itemDiff(sub, reverse) + diff, err := cli.itemDiff(ctx, sub, reverse) if err != nil { ret = append(ret, err.Error()) } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index ad02b357b70..9f685d0fac1 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -320,7 +320,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { return err } - return setup.InstallHubItems(hub, input, dryRun) + return setup.InstallHubItems(cmd.Context(), hub, input, dryRun) } func runSetupValidate(cmd *cobra.Command, args []string) error { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 17cf258745f..d11ed2b9a95 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -67,7 +67,8 @@ func testHub(t *testing.T, update bool) *Hub { require.NoError(t, err) if update { - err := hub.Update(context.TODO()) + ctx := context.Background() + err := hub.Update(ctx) require.NoError(t, err) } diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index eb56d8e32a8..6d4f35c285c 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -21,7 +21,7 @@ type DataSet struct { } // downloadDataSet downloads all the data files for an item. -func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { +func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { dec := yaml.NewDecoder(reader) for { @@ -53,8 +53,6 @@ func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *lo WithShelfLife(7 * 24 * time.Hour) } - ctx := context.TODO() - downloaded, err := d.Download(ctx, dataS.SourceURL) if err != nil { return fmt.Errorf("while getting data: %w", err) diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 13c495e2fcc..3d4ae5793b3 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -22,7 +22,9 @@ func TestInitHubUpdate(t *testing.T) { _, err := NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + ctx := context.Background() + + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() @@ -54,7 +56,9 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = tmpIndex.Name() - err = hub.Update(context.TODO()) + ctx := context.Background() + + err = hub.Update(ctx) cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") // bad domain @@ -66,7 +70,7 @@ func TestUpdateIndex(t *testing.T) { IndexPath: ".index.json", } - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) // XXX: this is not failing // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") @@ -82,6 +86,6 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" - err = hub.Update(context.TODO()) + err = hub.Update(ctx) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index 274e7128a04..6a16ad0a65f 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "fmt" ) @@ -39,7 +40,7 @@ func (i *Item) enable() error { } // Install installs the item from the hub, downloading it if needed. -func (i *Item) Install(force bool, downloadOnly bool) error { +func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error { if downloadOnly && i.State.Downloaded && i.State.UpToDate { i.hub.logger.Infof("%s is already downloaded and up-to-date", i.Name) @@ -48,7 +49,7 @@ func (i *Item) Install(force bool, downloadOnly bool) error { } } - downloaded, err := i.downloadLatest(force, true) + downloaded, err := i.downloadLatest(ctx, force, true) if err != nil { return err } diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 337f66f95fa..5bfc7e8148e 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "os" "testing" @@ -9,8 +10,10 @@ import ( ) func testInstall(hub *Hub, t *testing.T, item *Item) { + ctx := context.Background() + // Install the parser - _, err := item.downloadLatest(false, false) + _, err := item.downloadLatest(ctx, false, false) require.NoError(t, err, "failed to download %s", item.Name) err = hub.localSync() @@ -48,8 +51,10 @@ func testTaint(hub *Hub, t *testing.T, item *Item) { func testUpdate(hub *Hub, t *testing.T, item *Item) { assert.False(t, item.State.UpToDate, "%s should not be up-to-date", item.Name) + ctx := context.Background() + // Update it + check status - _, err := item.downloadLatest(true, true) + _, err := item.downloadLatest(ctx, true, true) require.NoError(t, err, "failed to update %s", item.Name) // Local sync and check status diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 8b3ec7481ef..4dad226fd78 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -16,7 +16,7 @@ import ( ) // Upgrade downloads and applies the last version of the item from the hub. -func (i *Item) Upgrade(force bool) (bool, error) { +func (i *Item) Upgrade(ctx context.Context, force bool) (bool, error) { if i.State.IsLocal() { i.hub.logger.Infof("not upgrading %s: local item", i.Name) return false, nil @@ -33,7 +33,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { if i.State.UpToDate { i.hub.logger.Infof("%s: up-to-date", i.Name) - if err := i.DownloadDataIfNeeded(force); err != nil { + if err := i.DownloadDataIfNeeded(ctx, force); err != nil { return false, fmt.Errorf("%s: download failed: %w", i.Name, err) } @@ -43,7 +43,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { } } - if _, err := i.downloadLatest(force, true); err != nil { + if _, err := i.downloadLatest(ctx, force, true); err != nil { return false, fmt.Errorf("%s: download failed: %w", i.Name, err) } @@ -65,7 +65,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { } // downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { +func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bool) (bool, error) { i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) for _, sub := range i.SubItems() { @@ -80,14 +80,14 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { if sub.HasSubItems() { i.hub.logger.Tracef("collection, recurse") - if _, err := sub.downloadLatest(overwrite, updateOnly); err != nil { + if _, err := sub.downloadLatest(ctx, overwrite, updateOnly); err != nil { return false, err } } downloaded := sub.State.Downloaded - if _, err := sub.download(overwrite); err != nil { + if _, err := sub.download(ctx, overwrite); err != nil { return false, err } @@ -105,11 +105,11 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { return false, nil } - return i.download(overwrite) + return i.download(ctx, overwrite) } // FetchContentTo downloads the last version of the item's YAML file to the specified path. -func (i *Item) FetchContentTo(destPath string) (bool, string, error) { +func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { url, err := i.hub.remote.urlTo(i.RemotePath) if err != nil { return false, "", fmt.Errorf("failed to build request: %w", err) @@ -131,8 +131,6 @@ func (i *Item) FetchContentTo(destPath string) (bool, string, error) { // TODO: recommend hub update if hash does not match - ctx := context.TODO() - downloaded, err := d.Download(ctx, url) if err != nil { return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err) @@ -142,7 +140,7 @@ func (i *Item) FetchContentTo(destPath string) (bool, string, error) { } // download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(overwrite bool) (bool, error) { +func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { // ensure that target file is within target dir finalPath, err := i.downloadPath() if err != nil { @@ -167,7 +165,7 @@ func (i *Item) download(overwrite bool) (bool, error) { } } - downloaded, _, err := i.FetchContentTo(finalPath) + downloaded, _, err := i.FetchContentTo(ctx, finalPath) if err != nil { return false, fmt.Errorf("while downloading %s: %w", i.Name, err) } @@ -188,7 +186,7 @@ func (i *Item) download(overwrite bool) (bool, error) { defer reader.Close() - if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { + if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) } @@ -196,7 +194,7 @@ func (i *Item) download(overwrite bool) (bool, error) { } // DownloadDataIfNeeded downloads the data set for the item. -func (i *Item) DownloadDataIfNeeded(force bool) error { +func (i *Item) DownloadDataIfNeeded(ctx context.Context, force bool) error { itemFilePath, err := i.installPath() if err != nil { return err @@ -209,7 +207,7 @@ func (i *Item) DownloadDataIfNeeded(force bool) error { defer itemFile.Close() - if err = downloadDataSet(i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { + if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { return fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index d86d2094955..5d302db3345 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -19,7 +19,9 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { require.False(t, item.State.Downloaded) require.False(t, item.State.Installed) - require.NoError(t, item.Install(false, false)) + ctx := context.Background() + + require.NoError(t, item.Install(ctx, false, false)) require.True(t, item.State.Downloaded) require.True(t, item.State.Installed) @@ -43,7 +45,7 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { hub, err := NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() @@ -58,7 +60,7 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { require.False(t, item.State.UpToDate) require.False(t, item.State.Tainted) - didUpdate, err := item.Upgrade(false) + didUpdate, err := item.Upgrade(ctx, false) require.NoError(t, err) require.True(t, didUpdate) assertCollectionDepsInstalled(t, hub, "crowdsecurity/test_collection") @@ -78,7 +80,9 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.False(t, item.State.Installed) require.False(t, hub.GetItem(SCENARIOS, "crowdsecurity/foobar_scenario").State.Installed) - require.NoError(t, item.Install(false, false)) + ctx := context.Background() + + require.NoError(t, item.Install(ctx, false, false)) require.True(t, item.State.Downloaded) require.True(t, item.State.Installed) @@ -110,14 +114,14 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { hub, err = NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() require.NoError(t, err) item = hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection") - didUpdate, err := item.Upgrade(false) + didUpdate, err := item.Upgrade(ctx, false) require.NoError(t, err) require.False(t, didUpdate) @@ -148,7 +152,9 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te require.False(t, item.State.Installed) require.False(t, hub.GetItem(SCENARIOS, "crowdsecurity/foobar_scenario").State.Installed) - require.NoError(t, item.Install(false, false)) + ctx := context.Background() + + require.NoError(t, item.Install(ctx, false, false)) require.True(t, item.State.Downloaded) require.True(t, item.State.Installed) @@ -185,7 +191,7 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te hub, err = NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() @@ -195,7 +201,7 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te hub = getHubOrFail(t, hub.local, remote) item = hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection") - didUpdate, err := item.Upgrade(false) + didUpdate, err := item.Upgrade(ctx, false) require.NoError(t, err) require.True(t, didUpdate) diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 096bdcd0a02..4b105777952 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -1,6 +1,7 @@ package hubtest import ( + "context" "errors" "fmt" "net/url" @@ -219,11 +220,13 @@ func (t *HubTestItem) InstallHub() error { return err } + ctx := context.Background() + // install data for parsers if needed ret := hub.GetItemMap(cwhub.PARSERS) for parserName, item := range ret { if item.State.Installed { - if err := item.DownloadDataIfNeeded(true); err != nil { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) } @@ -235,7 +238,7 @@ func (t *HubTestItem) InstallHub() error { ret = hub.GetItemMap(cwhub.SCENARIOS) for scenarioName, item := range ret { if item.State.Installed { - if err := item.DownloadDataIfNeeded(true); err != nil { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) } @@ -247,7 +250,7 @@ func (t *HubTestItem) InstallHub() error { ret = hub.GetItemMap(cwhub.POSTOVERFLOWS) for postoverflowName, item := range ret { if item.State.Installed { - if err := item.DownloadDataIfNeeded(true); err != nil { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) } diff --git a/pkg/setup/install.go b/pkg/setup/install.go index dc85706a15c..09ac15971f8 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -2,6 +2,7 @@ package setup import ( "bytes" + "context" "errors" "fmt" "os" @@ -46,7 +47,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err @@ -74,7 +75,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { continue } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing collection %s: %w", item.Name, err) } } @@ -93,7 +94,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { return fmt.Errorf("parser %s not found", parser) } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing parser %s: %w", item.Name, err) } } @@ -112,7 +113,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { return fmt.Errorf("scenario %s not found", scenario) } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing scenario %s: %w", item.Name, err) } } @@ -131,7 +132,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { return fmt.Errorf("postoverflow %s not found", postoverflow) } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) } } From 13fb25257134090d14ec717f70c7d871d9ad730c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 19:03:23 +0200 Subject: [PATCH 175/318] lint: replace cyclop, gocyclo with revive; basic pkg/hubtest helper (#3065) --- .golangci.yml | 24 +++++++++-------- pkg/hubtest/appsecrule.go | 8 ++---- pkg/hubtest/hubtest_item.go | 52 ++++++++++++------------------------- pkg/hubtest/parser.go | 8 ++---- pkg/hubtest/postoverflow.go | 8 ++---- pkg/hubtest/scenario.go | 8 ++---- 6 files changed, 37 insertions(+), 71 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1ec386183e1..8feb9921175 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,10 +1,6 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml linters-settings: - cyclop: - # lower this after refactoring - max-complexity: 45 - gci: sections: - standard @@ -20,10 +16,6 @@ linters-settings: # lower this after refactoring min-complexity: 128 - gocyclo: - # lower this after refactoring - min-complexity: 45 - funlen: # Checks the number of lines in a function. # If lower than 0, disable the check. @@ -133,7 +125,8 @@ linters-settings: - name: confusing-results disabled: true - name: cyclomatic - disabled: true + # lower this after refactoring + arguments: [45] - name: deep-exit disabled: true - name: defer @@ -228,6 +221,13 @@ linters: - structcheck - varcheck + # + # Redundant + # + + - gocyclo # revive + - cyclop # revive + # # Disabled until fixed for go 1.22 # @@ -243,7 +243,6 @@ linters: # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully - # - cyclop # checks function and package cyclomatic complexity # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages # - dupword # checks for duplicate words in the source code @@ -259,7 +258,6 @@ linters: # - gochecksumtype # Run exhaustiveness checks on Go "sum types" # - gocognit # Computes and checks the cognitive complexity of functions # - gocritic # Provides diagnostics that check for bugs, performance and style issues. - # - gocyclo # Computes and checks the cyclomatic complexity of functions # - goheader # Checks is file header matches to pattern # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. @@ -503,3 +501,7 @@ issues: - revive path: cmd/crowdsec-cli/copyfile.go + - linters: + - revive + path: pkg/hubtest/hubtest_item.go + text: "cyclomatic: .*RunWithLogFile" diff --git a/pkg/hubtest/appsecrule.go b/pkg/hubtest/appsecrule.go index fb4ad78cc18..1c4416c2e9b 100644 --- a/pkg/hubtest/appsecrule.go +++ b/pkg/hubtest/appsecrule.go @@ -25,12 +25,8 @@ func (t *HubTestItem) installAppsecRuleItem(item *cwhub.Item) error { // runtime/appsec-rules/ itemTypeDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) - if err := os.MkdirAll(hubDirAppsecRuleDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirAppsecRuleDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirAppsecRuleDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/appsec-rules/crowdsecurity/rule.yaml diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 4b105777952..5346fb0be50 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -380,6 +380,16 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { return nil } +func createDirs(dirs []string) error { + for _, dir := range dirs { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return fmt.Errorf("unable to create directory '%s': %w", dir, err) + } + } + + return nil +} + func (t *HubTestItem) RunWithLogFile() error { testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { @@ -391,30 +401,15 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("can't get current directory: %+v", err) } - // create runtime folder - if err = os.MkdirAll(t.RuntimePath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) - } - - // create runtime data folder - if err = os.MkdirAll(t.RuntimeDataPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeDataPath, err) - } - - // create runtime hub folder - if err = os.MkdirAll(t.RuntimeHubPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeHubPath, err) + // create runtime, data, hub folders + if err = createDirs([]string{t.RuntimePath, t.RuntimeDataPath, t.RuntimeHubPath, t.ResultsPath}); err != nil { + return err } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } - // create results folder - if err = os.MkdirAll(t.ResultsPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.ResultsPath, err) - } - // copy template config file to runtime folder if err = Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) @@ -585,30 +580,15 @@ func (t *HubTestItem) Run() error { t.Success = false t.ErrorsList = make([]string, 0) - // create runtime folder - if err = os.MkdirAll(t.RuntimePath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) - } - - // create runtime data folder - if err = os.MkdirAll(t.RuntimeDataPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeDataPath, err) - } - - // create runtime hub folder - if err = os.MkdirAll(t.RuntimeHubPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeHubPath, err) + // create runtime, data, hub, result folders + if err = createDirs([]string{t.RuntimePath, t.RuntimeDataPath, t.RuntimeHubPath, t.ResultsPath}); err != nil { + return err } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } - // create results folder - if err = os.MkdirAll(t.ResultsPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.ResultsPath, err) - } - // copy template config file to runtime folder if err = Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) diff --git a/pkg/hubtest/parser.go b/pkg/hubtest/parser.go index d40301e3015..31ff459ca77 100644 --- a/pkg/hubtest/parser.go +++ b/pkg/hubtest/parser.go @@ -23,12 +23,8 @@ func (t *HubTestItem) installParserItem(item *cwhub.Item) error { // runtime/parsers/s00-raw/ itemTypeDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, item.Stage) - if err := os.MkdirAll(hubDirParserDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirParserDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirParserDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml diff --git a/pkg/hubtest/postoverflow.go b/pkg/hubtest/postoverflow.go index 76a67b58b76..65fd0bfbc5d 100644 --- a/pkg/hubtest/postoverflow.go +++ b/pkg/hubtest/postoverflow.go @@ -23,12 +23,8 @@ func (t *HubTestItem) installPostoverflowItem(item *cwhub.Item) error { // runtime/postoverflows/s00-enrich itemTypeDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, item.Stage) - if err := os.MkdirAll(hubDirPostoverflowDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirPostoverflowDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirPostoverflowDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/postoverflows/s00-enrich/crowdsecurity/rdns.yaml diff --git a/pkg/hubtest/scenario.go b/pkg/hubtest/scenario.go index 35ea465b7c0..7f61e48accf 100644 --- a/pkg/hubtest/scenario.go +++ b/pkg/hubtest/scenario.go @@ -22,12 +22,8 @@ func (t *HubTestItem) installScenarioItem(item *cwhub.Item) error { // runtime/parsers/scenarios/ itemTypeDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) - if err := os.MkdirAll(hubDirScenarioDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirScenarioDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirScenarioDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/scenarios/crowdsecurity/ssh-bf.yaml From 819fa0ee2271cd665620c45ae4e886ec275f7c8e Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 10 Jun 2024 10:09:10 +0100 Subject: [PATCH 176/318] enhancement: add other log levels to docker start script and document them (#3072) --- docker/README.md | 3 +++ docker/docker_start.sh | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/docker/README.md b/docker/README.md index 5e39838a175..ad31d10aed6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -333,6 +333,9 @@ config.yaml) each time the container is run. | `DISABLE_APPSEC_RULES` | | Appsec rules files to remove, separated by space | | | | | | __Log verbosity__ | | | +| `LEVEL_FATAL` | false | Force FATAL level for the container log | +| `LEVEL_ERROR` | false | Force ERROR level for the container log | +| `LEVEL_WARN` | false | Force WARN level for the container log | | `LEVEL_INFO` | false | Force INFO level for the container log | | `LEVEL_DEBUG` | false | Force DEBUG level for the container log | | `LEVEL_TRACE` | false | Force TRACE level (VERY verbose) for the container log | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 954dbd9fc79..a3e9226bced 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -504,5 +504,17 @@ if istrue "$LEVEL_INFO"; then ARGS="$ARGS -info" fi +if istrue "$LEVEL_WARN"; then + ARGS="$ARGS -warning" +fi + +if istrue "$LEVEL_ERROR"; then + ARGS="$ARGS -error" +fi + +if istrue "$LEVEL_FATAL"; then + ARGS="$ARGS -fatal" +fi + # shellcheck disable=SC2086 exec crowdsec $ARGS From 7fd01ae3fced9b50f31734c1d59a72f0553be05a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Jun 2024 16:13:49 +0200 Subject: [PATCH 177/318] CI: update golangci-lint to 1.59; enforce canonical HTTP headers (#3074) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 21 ++++++++------------- cmd/crowdsec-cli/console.go | 3 ++- cmd/notification-sentinel/main.go | 4 ++-- pkg/acquisition/modules/loki/loki_test.go | 2 +- pkg/cticlient/client.go | 2 +- pkg/cticlient/client_test.go | 8 ++++---- pkg/exprhelpers/crowdsec_cti_test.go | 2 +- 9 files changed, 21 insertions(+), 25 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index e707062b5ad..daf6312447b 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -57,7 +57,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.57 + version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3496674e7b9..41e84189383 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -158,7 +158,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.57 + version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.golangci.yml b/.golangci.yml index 8feb9921175..bcd1d2113f6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -210,16 +210,7 @@ linters: # # DEPRECATED by golangi-lint # - - deadcode - - exhaustivestruct - - golint - - ifshort - - interfacer - - maligned - - nosnakecase - - scopelint - - structcheck - - varcheck + - execinquery # # Redundant @@ -249,7 +240,6 @@ linters: # - durationcheck # check for two durations multiplied together # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega @@ -331,6 +321,7 @@ linters: - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types + - mnd # An analyzer to detect magic numbers. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. - noctx # Finds sending http request without context.Context - unparam # Reports unused function parameters @@ -350,7 +341,7 @@ linters: - dupl # Tool for code clone detection - forcetypeassert # finds forced type assertions - godox # Tool for detection of FIXME, TODO and other comment keywords - - goerr113 # Go linter to check the errors handling expressions + - err113 # Go linter to check the errors handling expressions - paralleltest # Detects missing usage of t.Parallel() method in your Go test - testpackage # linter that makes you use a separate _test package @@ -383,7 +374,7 @@ issues: - pkg/yamlpatch/merge.go - pkg/yamlpatch/merge_test.go - exclude-generated-strict: true + exclude-generated: strict max-issues-per-linter: 0 max-same-issues: 0 @@ -505,3 +496,7 @@ issues: - revive path: pkg/hubtest/hubtest_item.go text: "cyclomatic: .*RunWithLogFile" + + - linters: + - canonicalheader + path: pkg/apiserver/middlewares/v1/tls_auth.go diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 972d43122cc..3c7df395b30 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "net/http" "net/url" "os" "strconv" @@ -138,7 +139,7 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st return fmt.Errorf("could not enroll instance: %w", err) } - if resp.Response.StatusCode == 200 && !overwrite { + if resp.Response.StatusCode == http.StatusOK && !overwrite { log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") return nil } diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index c627f9271e2..9c34e63a289 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -37,7 +37,7 @@ var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ }) func (s *SentinelPlugin) getAuthorizationHeader(now string, length int, pluginName string) (string, error) { - xHeaders := "x-ms-date:" + now + xHeaders := "X-Ms-Date:" + now stringToHash := fmt.Sprintf("POST\n%d\napplication/json\n%s\n/api/logs", length, xHeaders) decodedKey, _ := base64.StdEncoding.DecodeString(s.PluginConfigByName[pluginName].SharedKey) @@ -87,7 +87,7 @@ func (s *SentinelPlugin) Notify(ctx context.Context, notification *protobufs.Not req.Header.Set("Content-Type", "application/json") req.Header.Set("Log-Type", s.PluginConfigByName[notification.Name].LogType) req.Header.Set("Authorization", authorization) - req.Header.Set("x-ms-date", now) + req.Header.Set("X-Ms-Date", now) client := &http.Client{} resp, err := client.Do(req.WithContext(ctx)) diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 9ac3ccbd321..83742546959 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -294,7 +294,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { } req.Header.Set("Content-Type", "application/json") - req.Header.Set("X-Scope-OrgID", "1234") + req.Header.Set("X-Scope-Orgid", "1234") resp, err := http.DefaultClient.Do(req) if err != nil { diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index 4df4d65a63c..b95d1237619 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -43,7 +43,7 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map if err != nil { return nil, err } - req.Header.Set("x-api-key", c.apiKey) + req.Header.Set("X-Api-Key", c.apiKey) resp, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/pkg/cticlient/client_test.go b/pkg/cticlient/client_test.go index 79406a6c2a9..cdbbd0c9732 100644 --- a/pkg/cticlient/client_test.go +++ b/pkg/cticlient/client_test.go @@ -38,7 +38,7 @@ func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { func fireHandler(req *http.Request) *http.Response { var err error - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { log.Warningf("invalid api key: %s", apiKey) @@ -105,7 +105,7 @@ func fireHandler(req *http.Request) *http.Response { } func smokeHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, @@ -137,7 +137,7 @@ func smokeHandler(req *http.Request) *http.Response { } func rateLimitedHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, @@ -154,7 +154,7 @@ func rateLimitedHandler(req *http.Request) *http.Response { } func searchHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, diff --git a/pkg/exprhelpers/crowdsec_cti_test.go b/pkg/exprhelpers/crowdsec_cti_test.go index 84a4b245720..4c0346e447c 100644 --- a/pkg/exprhelpers/crowdsec_cti_test.go +++ b/pkg/exprhelpers/crowdsec_cti_test.go @@ -69,7 +69,7 @@ func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { } func smokeHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, From 31ed9fb5eed446250b824831493075c509088636 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Jun 2024 17:36:22 +0200 Subject: [PATCH 178/318] enable linter: revive (indent-error-flow) (#3068) * enable linter: revive (indent-error-flow) * lint --- .golangci.yml | 3 -- pkg/acquisition/modules/appsec/rx_operator.go | 4 +-- pkg/acquisition/modules/kinesis/kinesis.go | 22 ++++++------- .../loki/internal/lokiclient/loki_client.go | 25 +++++++-------- pkg/acquisition/modules/s3/s3.go | 21 ++++++------ pkg/cache/cache.go | 7 ++-- pkg/hubtest/hubtest_item.go | 3 +- pkg/leakybucket/reset_filter.go | 32 +++++++++---------- pkg/leakybucket/uniq.go | 6 ++-- pkg/types/event.go | 7 ++-- 10 files changed, 62 insertions(+), 68 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index bcd1d2113f6..14abfa3e1ae 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -70,7 +70,6 @@ linters-settings: - "!**/pkg/database/*.go" - "!**/pkg/exprhelpers/*.go" - "!**/pkg/acquisition/modules/appsec/appsec.go" - - "!**/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go" - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: @@ -147,8 +146,6 @@ linters-settings: disabled: true - name: increment-decrement disabled: true - - name: indent-error-flow - disabled: true - name: import-alias-naming disabled: true - name: import-shadowing diff --git a/pkg/acquisition/modules/appsec/rx_operator.go b/pkg/acquisition/modules/appsec/rx_operator.go index 43aaf9e94be..73060037657 100644 --- a/pkg/acquisition/modules/appsec/rx_operator.go +++ b/pkg/acquisition/modules/appsec/rx_operator.go @@ -50,9 +50,9 @@ func (o *rx) Evaluate(tx plugintypes.TransactionState, value string) bool { tx.CaptureField(i, c) } return true - } else { - return o.re.MatchString(value) } + + return o.re.MatchString(value) } // RegisterRX registers the rx operator using a WASI implementation instead of Go. diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index a86816244f6..5d3cf8f80a0 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "encoding/json" + "errors" "fmt" "io" "strings" @@ -95,7 +96,7 @@ func (k *KinesisSource) newClient() error { } if sess == nil { - return fmt.Errorf("failed to create aws session") + return errors.New("failed to create aws session") } config := aws.NewConfig() if k.Config.AwsRegion != "" { @@ -106,7 +107,7 @@ func (k *KinesisSource) newClient() error { } k.kClient = kinesis.New(sess, config) if k.kClient == nil { - return fmt.Errorf("failed to create kinesis client") + return errors.New("failed to create kinesis client") } return nil } @@ -124,7 +125,7 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { err := yaml.UnmarshalStrict(yamlConfig, &k.Config) if err != nil { - return fmt.Errorf("Cannot parse kinesis datasource configuration: %w", err) + return fmt.Errorf("cannot parse kinesis datasource configuration: %w", err) } if k.Config.Mode == "" { @@ -132,16 +133,16 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { } if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut { - return fmt.Errorf("stream_name is mandatory when use_enhanced_fanout is false") + return errors.New("stream_name is mandatory when use_enhanced_fanout is false") } if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut { - return fmt.Errorf("stream_arn is mandatory when use_enhanced_fanout is true") + return errors.New("stream_arn is mandatory when use_enhanced_fanout is true") } if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut { - return fmt.Errorf("consumer_name is mandatory when use_enhanced_fanout is true") + return errors.New("consumer_name is mandatory when use_enhanced_fanout is true") } if k.Config.StreamARN != "" && k.Config.StreamName != "" { - return fmt.Errorf("stream_arn and stream_name are mutually exclusive") + return errors.New("stream_arn and stream_name are mutually exclusive") } if k.Config.MaxRetries <= 0 { k.Config.MaxRetries = 10 @@ -169,7 +170,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL } func (k *KinesisSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("kinesis datasource does not support command-line acquisition") + return errors.New("kinesis datasource does not support command-line acquisition") } func (k *KinesisSource) GetMode() string { @@ -181,7 +182,7 @@ func (k *KinesisSource) GetName() string { } func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("kinesis datasource does not support one-shot acquisition") + return errors.New("kinesis datasource does not support one-shot acquisition") } func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { @@ -524,9 +525,8 @@ func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") if k.Config.UseEnhancedFanOut { return k.EnhancedRead(out, t) - } else { - return k.ReadFromStream(out, t) } + return k.ReadFromStream(out, t) }) return nil } diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 6437a951504..359fef5bb96 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -13,7 +14,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/gorilla/websocket" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" ) @@ -120,11 +120,10 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu resp, err := lc.Get(uri) if err != nil { if ok := lc.shouldRetry(); !ok { - return errors.Wrapf(err, "error querying range") - } else { - lc.increaseTicker(ticker) - continue + return fmt.Errorf("error querying range: %w", err) } + lc.increaseTicker(ticker) + continue } if resp.StatusCode != http.StatusOK { @@ -132,22 +131,20 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu body, _ := io.ReadAll(resp.Body) resp.Body.Close() if ok := lc.shouldRetry(); !ok { - return errors.Wrapf(err, "bad HTTP response code: %d: %s", resp.StatusCode, string(body)) - } else { - lc.increaseTicker(ticker) - continue + return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) } + lc.increaseTicker(ticker) + continue } var lq LokiQueryRangeResponse if err := json.NewDecoder(resp.Body).Decode(&lq); err != nil { resp.Body.Close() if ok := lc.shouldRetry(); !ok { - return errors.Wrapf(err, "error decoding Loki response") - } else { - lc.increaseTicker(ticker) - continue + return fmt.Errorf("error decoding Loki response: %w", err) } + lc.increaseTicker(ticker) + continue } resp.Body.Close() lc.Logger.Tracef("Got response: %+v", lq) @@ -261,7 +258,7 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { if err != nil { lc.Logger.Errorf("Error connecting to websocket, err: %s", err) - return responseChan, fmt.Errorf("error connecting to websocket") + return responseChan, errors.New("error connecting to websocket") } lc.t.Go(func() error { diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index d1bf881079a..71fcb23cbc4 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -276,7 +276,7 @@ func extractBucketAndPrefixFromEventBridge(message *string) (string, string, err if eventBody.Detail.Bucket.Name != "" { return eventBody.Detail.Bucket.Name, eventBody.Detail.Object.Key, nil } - return "", "", fmt.Errorf("invalid event body for event bridge format") + return "", "", errors.New("invalid event body for event bridge format") } func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) { @@ -286,7 +286,7 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) return "", "", err } if len(s3notifBody.Records) == 0 { - return "", "", fmt.Errorf("no records found in S3 notification") + return "", "", errors.New("no records found in S3 notification") } if !strings.HasPrefix(s3notifBody.Records[0].EventName, "ObjectCreated:") { return "", "", fmt.Errorf("event %s is not supported", s3notifBody.Records[0].EventName) @@ -295,19 +295,20 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) } func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, error) { - if s.Config.SQSFormat == SQSFormatEventBridge { + switch s.Config.SQSFormat { + case SQSFormatEventBridge: bucket, key, err := extractBucketAndPrefixFromEventBridge(message) if err != nil { return "", "", err } return bucket, key, nil - } else if s.Config.SQSFormat == SQSFormatS3Notification { + case SQSFormatS3Notification: bucket, key, err := extractBucketAndPrefixFromS3Notif(message) if err != nil { return "", "", err } return bucket, key, nil - } else { + default: bucket, key, err := extractBucketAndPrefixFromEventBridge(message) if err == nil { s.Config.SQSFormat = SQSFormatEventBridge @@ -318,7 +319,7 @@ func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, erro s.Config.SQSFormat = SQSFormatS3Notification return bucket, key, nil } - return "", "", fmt.Errorf("SQS message format not supported") + return "", "", errors.New("SQS message format not supported") } } @@ -496,15 +497,15 @@ func (s *S3Source) UnmarshalConfig(yamlConfig []byte) error { } if s.Config.BucketName != "" && s.Config.SQSName != "" { - return fmt.Errorf("bucket_name and sqs_name are mutually exclusive") + return errors.New("bucket_name and sqs_name are mutually exclusive") } if s.Config.PollingMethod == PollMethodSQS && s.Config.SQSName == "" { - return fmt.Errorf("sqs_name is required when using sqs polling method") + return errors.New("sqs_name is required when using sqs polling method") } if s.Config.BucketName == "" && s.Config.PollingMethod == PollMethodList { - return fmt.Errorf("bucket_name is required") + return errors.New("bucket_name is required") } if s.Config.SQSFormat != "" && s.Config.SQSFormat != SQSFormatEventBridge && s.Config.SQSFormat != SQSFormatS3Notification { @@ -567,7 +568,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * dsn = strings.TrimPrefix(dsn, "s3://") args := strings.Split(dsn, "?") if len(args[0]) == 0 { - return fmt.Errorf("empty s3:// DSN") + return errors.New("empty s3:// DSN") } if len(args) == 2 && len(args[1]) != 0 { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 5b0dcbdc251..c2153ee2342 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -111,7 +111,8 @@ func SetKey(cacheName string, key string, value string, expiration *time.Duratio func GetKey(cacheName string, key string) (string, error) { for i, name := range CacheNames { if name == cacheName { - if value, err := Caches[i].Get(key); err != nil { + value, err := Caches[i].Get(key) + if err != nil { // do not warn or log if key not found if errors.Is(err, gcache.KeyNotFoundError) { return "", nil @@ -119,9 +120,9 @@ func GetKey(cacheName string, key string) (string, error) { CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err) return "", err - } else { - return value.(string), nil } + + return value.(string), nil } } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 5346fb0be50..da4969ee8dd 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -645,7 +645,6 @@ func (t *HubTestItem) Run() error { return t.RunWithLogFile() } else if t.Config.NucleiTemplate != "" { return t.RunWithNucleiTemplate() - } else { - return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } + return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 5884bf4a10c..6e61510fcc7 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -82,22 +82,22 @@ func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error { cancelExprCacheLock.Unlock() u.CancelOnFilter = compiled.CancelOnFilter return nil - } else { - cancelExprCacheLock.Unlock() - //release the lock during compile + } - compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) - if err != nil { - bucketFactory.logger.Errorf("reset_filter compile error : %s", err) - return err - } - u.CancelOnFilter = compiledExpr.CancelOnFilter - if bucketFactory.Debug { - u.Debug = true - } - cancelExprCacheLock.Lock() - cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr - cancelExprCacheLock.Unlock() + cancelExprCacheLock.Unlock() + //release the lock during compile + + compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + bucketFactory.logger.Errorf("reset_filter compile error : %s", err) + return err } - return err + u.CancelOnFilter = compiledExpr.CancelOnFilter + if bucketFactory.Debug { + u.Debug = true + } + cancelExprCacheLock.Lock() + cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr + cancelExprCacheLock.Unlock() + return nil } diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 06d1e154a6f..197246c91a0 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -39,11 +39,9 @@ func (u *Uniq) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Lea leaky.logger.Debugf("Uniq(%s) : ok", element) u.KeyCache[element] = true return &msg - - } else { - leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) - return nil } + leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) + return nil } } diff --git a/pkg/types/event.go b/pkg/types/event.go index c7b19fe3ca4..90464aadf2c 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -63,11 +63,12 @@ func (e *Event) SetParsed(key string, value string) bool { } func (e *Event) GetType() string { - if e.Type == OVFLW { + switch e.Type { + case OVFLW: return "overflow" - } else if e.Type == LOG { + case LOG: return "log" - } else { + default: log.Warningf("unknown event type for %+v", e) return "unknown" } From ae58b158a5306760f18de337564e65f7f80686ba Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 09:26:50 +0200 Subject: [PATCH 179/318] enable linter: revive (var-declaration) (#3069) --- .golangci.yml | 2 -- pkg/acquisition/modules/s3/s3.go | 2 +- pkg/appsec/appsec_rule/modsecurity.go | 8 ++++---- pkg/appsec/coraza_logger.go | 2 +- pkg/appsec/loader.go | 2 +- pkg/exprhelpers/crowdsec_cti.go | 2 +- pkg/parser/parsing_test.go | 2 +- pkg/types/getfstype.go | 2 +- 8 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 14abfa3e1ae..c72cb70bf42 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -170,8 +170,6 @@ linters-settings: disabled: true - name: var-naming disabled: true - - name: var-declaration - disabled: true - name: unchecked-type-assertion disabled: true - name: exported diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 71fcb23cbc4..9ef4d2ba757 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -204,7 +204,7 @@ func (s *S3Source) getBucketContent() ([]*s3.Object, error) { logger := s.logger.WithField("method", "getBucketContent") logger.Debugf("Getting bucket content for %s", s.Config.BucketName) bucketObjects := make([]*s3.Object, 0) - var continuationToken *string = nil + var continuationToken *string for { out, err := s.s3Client.ListObjectsV2WithContext(s.ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(s.Config.BucketName), diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index 8f58a9589ca..135ba525e8e 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -11,7 +11,7 @@ type ModsecurityRule struct { ids []uint32 } -var zonesMap map[string]string = map[string]string{ +var zonesMap = map[string]string{ "ARGS": "ARGS_GET", "ARGS_NAMES": "ARGS_GET_NAMES", "BODY_ARGS": "ARGS_POST", @@ -31,7 +31,7 @@ var zonesMap map[string]string = map[string]string{ "FILENAMES": "FILES", } -var transformMap map[string]string = map[string]string{ +var transformMap = map[string]string{ "lowercase": "t:lowercase", "uppercase": "t:uppercase", "b64decode": "t:base64Decode", @@ -45,7 +45,7 @@ var transformMap map[string]string = map[string]string{ "html_entity_decode": "t:htmlEntityDecode", } -var matchMap map[string]string = map[string]string{ +var matchMap = map[string]string{ "regex": "@rx", "equals": "@streq", "startsWith": "@beginsWith", @@ -60,7 +60,7 @@ var matchMap map[string]string = map[string]string{ "eq": "@eq", } -var bodyTypeMatch map[string]string = map[string]string{ +var bodyTypeMatch = map[string]string{ "json": "JSON", "xml": "XML", "multipart": "MULTIPART", diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 2b7f85d4e46..d2c1612cbd7 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -9,7 +9,7 @@ import ( dbg "github.com/crowdsecurity/coraza/v3/debuglog" ) -var DebugRules map[int]bool = map[int]bool{} +var DebugRules = map[int]bool{} func SetRuleDebug(id int, debug bool) { DebugRules[id] = debug diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 86c1dc0a80e..82b8d440771 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -8,7 +8,7 @@ import ( "gopkg.in/yaml.v2" ) -var appsecRules map[string]AppsecCollectionConfig = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this +var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 268979ee243..a640ea79f48 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -20,7 +20,7 @@ var CTIApiEnabled = false // when hitting quotas or auth errors, we temporarily disable the API var CTIBackOffUntil time.Time -var CTIBackOffDuration time.Duration = 5 * time.Minute +var CTIBackOffDuration = 5 * time.Minute var ctiClient *cticlient.CrowdsecCTIClient diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index f142e1bc29a..534b433a1b9 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -24,7 +24,7 @@ type TestFile struct { Results []types.Event `yaml:"results,omitempty"` } -var debug bool = false +var debug = false func TestParser(t *testing.T) { debug = true diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index aac12c7fc94..c16eea5cf98 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -11,7 +11,7 @@ import ( // Generated with `man statfs | grep _MAGIC | awk '{split(tolower($1),a,"_"); print $2 ": \"" a[1] "\","}'` // ext2/3/4 duplicates removed to just have ext4 // XIAFS removed as well -var fsTypeMapping map[int64]string = map[int64]string{ +var fsTypeMapping = map[int64]string{ 0xadf5: "adfs", 0xadff: "affs", 0x5346414f: "afs", From c39c9cf8b76d5e34548f2ae25ba51844f6ef9aa1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 09:28:10 +0200 Subject: [PATCH 180/318] cscli: fix deprecation message for "context delete" (#3078) missing newline, proper return code, same msg as dashboard, test --- cmd/crowdsec-cli/lapi.go | 3 +-- test/bats/09_context.bats | 5 +++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index c2c92140fd6..d2955230de9 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -464,9 +464,8 @@ func (cli *cliLapi) newContextDeleteCmd() *cobra.Command { if filePath == "" { filePath = "the context file" } - fmt.Printf("Command 'delete' is deprecated, please manually edit %s.", filePath) - return nil + return fmt.Errorf("command 'delete' has been removed, please manually edit %s", filePath) }, } diff --git a/test/bats/09_context.bats b/test/bats/09_context.bats index ba295451070..71aabc68d29 100644 --- a/test/bats/09_context.bats +++ b/test/bats/09_context.bats @@ -65,6 +65,11 @@ teardown() { assert_stderr --partial "while checking console_context_path: stat $CONTEXT_YAML: no such file or directory" } +@test "csli lapi context delete" { + rune -1 cscli lapi context delete + assert_stderr --partial "command 'delete' has been removed, please manually edit the context file" +} + @test "context file is bad" { echo "bad yaml" > "$CONTEXT_YAML" rune -1 "$CROWDSEC" -t From 1d08edc200c388a1741fad51f1b567e0ecfa7cad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 09:32:20 +0200 Subject: [PATCH 181/318] enable linter: revive (deep-exit) (#3077) --- .golangci.yml | 54 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c72cb70bf42..925005a5b05 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -126,8 +126,6 @@ linters-settings: - name: cyclomatic # lower this after refactoring arguments: [45] - - name: deep-exit - disabled: true - name: defer disabled: true - name: empty-block @@ -495,3 +493,55 @@ issues: - linters: - canonicalheader path: pkg/apiserver/middlewares/v1/tls_auth.go + + # tolerate deep exit in tests, for now + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "deep-exit: .*" + + # tolerate deep exit in cobra's OnInitialize, for now + - linters: + - revive + path: "cmd/crowdsec-cli/main.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/item_metrics.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/machines.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/utils.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/hubtest.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "pkg/leakybucket/overflows.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec/crowdsec.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec/api.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec/win_service.go" + text: "deep-exit: .*" From 24687e982a5c38dd91688057c09d4a0e1be6b0af Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 11 Jun 2024 09:34:37 +0100 Subject: [PATCH 182/318] enhancement: add deprecation notice to cscli dashboard prerun (#3079) --- cmd/crowdsec-cli/dashboard.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 59b9e67cd94..beff06d478a 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -99,6 +99,8 @@ cscli dashboard remove } } + log.Warn("cscli dashboard will be deprecated in version 1.7.0, read more at https://docs.crowdsec.net/blog/cscli_dashboard_deprecation/") + return nil }, } From bd4540b1bf5bfc086fb69ec765605ad066300bc4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 12:13:18 +0200 Subject: [PATCH 183/318] refactor context (cscli, pkg/database) (#3071) * cscli: helper require.DBClient() * refactor pkg/database: explicit context to dbclient constructor * lint --- cmd/crowdsec-cli/alerts.go | 15 +++++++++++---- cmd/crowdsec-cli/bouncers.go | 6 +++--- cmd/crowdsec-cli/machines.go | 6 +++--- cmd/crowdsec-cli/papi.go | 13 ++++++------- cmd/crowdsec-cli/require/require.go | 10 ++++++++++ cmd/crowdsec-cli/support.go | 4 ++-- cmd/crowdsec/run_in_svc.go | 6 ++++-- cmd/crowdsec/run_in_svc_windows.go | 5 ++++- cmd/crowdsec/serve.go | 5 ++++- pkg/apiserver/apic_test.go | 22 ++++++++++++++-------- pkg/apiserver/apiserver.go | 6 ++++-- pkg/apiserver/apiserver_test.go | 21 ++++++++++++++------- pkg/database/alerts.go | 2 +- pkg/database/database.go | 8 ++++---- pkg/exprhelpers/exprlib_test.go | 17 +++++++++++++---- 15 files changed, 97 insertions(+), 49 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index e1b070ab0fc..7c9c5f23032 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -24,7 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -378,6 +377,7 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { return err } + if ActiveDecision != nil { alertDeleteFilter.ActiveDecisionEquals = ActiveDecision } @@ -385,21 +385,27 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ if *alertDeleteFilter.ScopeEquals == "" { alertDeleteFilter.ScopeEquals = nil } + if *alertDeleteFilter.ValueEquals == "" { alertDeleteFilter.ValueEquals = nil } + if *alertDeleteFilter.ScenarioEquals == "" { alertDeleteFilter.ScenarioEquals = nil } + if *alertDeleteFilter.IPEquals == "" { alertDeleteFilter.IPEquals = nil } + if *alertDeleteFilter.RangeEquals == "" { alertDeleteFilter.RangeEquals = nil } + if contained != nil && *contained { alertDeleteFilter.Contains = new(bool) } + limit := 0 alertDeleteFilter.Limit = &limit } else { @@ -419,6 +425,7 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ return fmt.Errorf("unable to delete alert: %w", err) } } + log.Infof("%s alert(s) deleted", alerts.NbDeleted) return nil @@ -558,14 +565,14 @@ func (cli *cliAlerts) NewFlushCmd() *cobra.Command { /!\ This command can be used only on the same machine than the local API`, Example: `cscli alerts flush --max-items 1000 --max-age 7d`, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err } - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %w", err) + return err } log.Info("Flushing alerts. !! This may take a long time !!") err = db.FlushAlerts(maxAge, maxItems) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 2e0adb9b95f..f8628538378 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -57,7 +57,7 @@ Note: This command requires database direct access, so is intended to be run on Args: cobra.MinimumNArgs(1), Aliases: []string{"bouncer"}, DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { var err error cfg := cli.cfg() @@ -66,9 +66,9 @@ Note: This command requires database direct access, so is intended to be run on return err } - cli.db, err = database.NewClient(cfg.DbConfig) + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("can't connect to the database: %w", err) + return err } return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 1457fb5a0cc..7beaa5c7fdd 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -128,14 +128,14 @@ Note: This command requires database direct access, so is intended to be run on Example: `cscli machines [action]`, DisableAutoGenTag: true, Aliases: []string{"machine"}, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { var err error if err = require.LAPI(cli.cfg()); err != nil { return err } - cli.db, err = database.NewClient(cli.cfg().DbConfig) + cli.db, err = require.DBClient(cmd.Context(), cli.cfg().DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %w", err) + return err } return nil diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index b8ed0fd7356..a2fa0a90871 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -12,7 +12,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" - "github.com/crowdsecurity/crowdsec/pkg/database" ) type cliPapi struct { @@ -56,12 +55,12 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var err error cfg := cli.cfg() - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %w", err) + return err } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) @@ -105,14 +104,14 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var err error cfg := cli.cfg() t := tomb.Tomb{} - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %w", err) + return err } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 3ff66254466..15d8bce682d 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -10,6 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/database" ) func LAPI(c *csconfig.Config) error { @@ -48,6 +49,15 @@ func CAPIRegistered(c *csconfig.Config) error { return nil } +func DBClient(ctx context.Context, dbcfg *csconfig.DatabaseCfg) (*database.Client, error) { + db, err := database.NewClient(ctx, dbcfg) + if err != nil { + return nil, fmt.Errorf("failed to connect to database: %w", err) + } + + return db, nil +} + func DB(c *csconfig.Config) error { if err := c.LoadDBConfig(true); err != nil { return fmt.Errorf("this command requires direct database access (must be run on the local API machine): %w", err) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 41e23dc9654..3b0f53cd6e1 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -463,9 +463,9 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { w := bytes.NewBuffer(nil) zipWriter := zip.NewWriter(w) - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { - log.Warnf("Could not connect to database: %s", err) + log.Warn(err) } if err = cfg.LoadAPIServer(true); err != nil { diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go index 58f4cdf005d..288b565e890 100644 --- a/cmd/crowdsec/run_in_svc.go +++ b/cmd/crowdsec/run_in_svc.go @@ -3,6 +3,7 @@ package main import ( + "context" "fmt" "runtime/pprof" @@ -41,9 +42,10 @@ func StartRunSvc() error { var err error - if cConfig.DbConfig != nil { - dbClient, err = database.NewClient(cConfig.DbConfig) + ctx := context.TODO() + if cConfig.DbConfig != nil { + dbClient, err = database.NewClient(ctx, cConfig.DbConfig) if err != nil { return fmt.Errorf("unable to create database client: %w", err) } diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go index c0aa18d7fc6..a2a2dd8c47a 100644 --- a/cmd/crowdsec/run_in_svc_windows.go +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "runtime/pprof" @@ -80,8 +81,10 @@ func WindowsRun() error { var dbClient *database.Client var err error + ctx := context.TODO() + if cConfig.DbConfig != nil { - dbClient, err = database.NewClient(cConfig.DbConfig) + dbClient, err = database.NewClient(ctx, cConfig.DbConfig) if err != nil { return fmt.Errorf("unable to create database client: %w", err) diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 6c15b2d347e..5fb7b86f181 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "os" "os/signal" @@ -322,8 +323,10 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + ctx := context.TODO() + if cConfig.API.Server != nil && cConfig.API.Server.DbConfig != nil { - dbClient, err := database.NewClient(cConfig.API.Server.DbConfig) + dbClient, err := database.NewClient(ctx, cConfig.API.Server.DbConfig) if err != nil { return fmt.Errorf("failed to get database client: %w", err) } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index c5a39455ff9..10f4cf9444b 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -38,9 +38,11 @@ import ( func getDBClient(t *testing.T) *database.Client { t.Helper() + ctx := context.Background() + dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) - dbClient, err := database.NewClient(&csconfig.DatabaseCfg{ + dbClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", DbPath: dbPath.Name(), @@ -56,7 +58,7 @@ func getAPIC(t *testing.T) *apic { return &apic{ AlertsAddChan: make(chan []*models.Alert), - //DecisionDeleteChan: make(chan []*models.Decision), + // DecisionDeleteChan: make(chan []*models.Decision), dbClient: dbClient, mu: sync.Mutex{}, startup: true, @@ -176,10 +178,11 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } scenarios, err := api.FetchScenariosListFromDB() + require.NoError(t, err) + for machineID := range tc.machineIDsWithScenarios { api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(context.Background()) } - require.NoError(t, err) assert.ElementsMatch(t, tc.expectedScenarios, scenarios) }) @@ -234,6 +237,7 @@ func TestNewAPIC(t *testing.T) { t.Run(tc.name, func(t *testing.T) { setConfig() httpmock.Activate() + defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("POST", "http://foobar/v3/watchers/login", httpmock.NewBytesResponder( 200, jsonMarshalX( @@ -353,6 +357,7 @@ func TestAPICGetMetrics(t *testing.T) { t.Run(tc.name, func(t *testing.T) { apiClient := getAPIC(t) cleanUp(apiClient) + for i, machineID := range tc.machineIDs { apiClient.dbClient.Ent.Machine.Create(). SetMachineId(machineID). @@ -548,7 +553,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { func TestAPICWhitelists(t *testing.T) { api := getAPIC(t) - //one whitelist on IP, one on CIDR + // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} api.whitelists.Ips = append(api.whitelists.Ips, net.ParseIP("9.2.3.4"), net.ParseIP("7.2.3.4")) @@ -593,7 +598,7 @@ func TestAPICWhitelists(t *testing.T) { Scope: ptr.Of("Ip"), Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ { - Value: ptr.Of("13.2.3.4"), //wl by cidr + Value: ptr.Of("13.2.3.4"), // wl by cidr Duration: ptr.Of("24h"), }, }, @@ -614,7 +619,7 @@ func TestAPICWhitelists(t *testing.T) { Scope: ptr.Of("Ip"), Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ { - Value: ptr.Of("13.2.3.5"), //wl by cidr + Value: ptr.Of("13.2.3.5"), // wl by cidr Duration: ptr.Of("24h"), }, }, @@ -634,7 +639,7 @@ func TestAPICWhitelists(t *testing.T) { Scope: ptr.Of("Ip"), Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ { - Value: ptr.Of("9.2.3.4"), //wl by ip + Value: ptr.Of("9.2.3.4"), // wl by ip Duration: ptr.Of("24h"), }, }, @@ -685,7 +690,7 @@ func TestAPICWhitelists(t *testing.T) { err = api.PullTop(false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) //2 from FIRE + 2 from bl + 1 existing + assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -1103,6 +1108,7 @@ func TestAPICPush(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() + apic, err := apiclient.NewDefaultClient( url, "/api", diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 056f74a7b31..c6074801d7e 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -162,7 +162,9 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { var flushScheduler *gocron.Scheduler - dbClient, err := database.NewClient(config.DbConfig) + ctx := context.TODO() + + dbClient, err := database.NewClient(ctx, config.DbConfig) if err != nil { return nil, fmt.Errorf("unable to init database client: %w", err) } @@ -227,7 +229,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller := &controllers.Controller{ DBClient: dbClient, - Ectx: context.Background(), + Ectx: ctx, Router: router, Profiles: config.Profiles, Log: clog, diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b7f6be5fe36..20c48337833 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "net/http" @@ -161,7 +162,9 @@ func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { } func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - dbClient, err := database.NewClient(config) + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) err = dbClient.ValidateMachine(machineID) @@ -169,7 +172,9 @@ func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCf } func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) string { - dbClient, err := database.NewClient(config) + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) machines, err := dbClient.ListMachines() @@ -260,7 +265,9 @@ func CreateTestMachine(t *testing.T, router *gin.Engine) string { } func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { - dbClient, err := database.NewClient(config) + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) apiKey, err := middlewares.GenerateAPIKey(keyLength) @@ -356,10 +363,10 @@ func TestLoggingDebugToFileConfig(t *testing.T) { req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, 404, w.Code) - //wait for the request to happen + // wait for the request to happen time.Sleep(500 * time.Millisecond) - //check file content + // check file content data, err := os.ReadFile(expectedFile) require.NoError(t, err) @@ -406,10 +413,10 @@ func TestLoggingErrorToFileConfig(t *testing.T) { req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) - //wait for the request to happen + // wait for the request to happen time.Sleep(500 * time.Millisecond) - //check file content + // check file content x, err := os.ReadFile(expectedFile) if err == nil { require.Empty(t, x) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index d9efe4c254b..3563adba68c 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -947,7 +947,7 @@ func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string Count int } - ctx := context.Background() + ctx := context.TODO() query := c.Ent.Alert.Query() diff --git a/pkg/database/database.go b/pkg/database/database.go index fc89aa5cd4d..6f392c46d21 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -48,7 +48,7 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. return drv, nil } -func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { +func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, error) { var client *ent.Client if config == nil { @@ -69,7 +69,7 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { typ, dia, err := config.ConnectionDialect() if err != nil { - return nil, err //unsupported database caught here + return nil, err // unsupported database caught here } if config.Type == "sqlite" { @@ -103,13 +103,13 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { client = client.Debug() } - if err = client.Schema.Create(context.Background()); err != nil { + if err = client.Schema.Create(ctx); err != nil { return nil, fmt.Errorf("failed creating schema resources: %v", err) } return &Client{ Ent: client, - CTX: context.Background(), + CTX: ctx, Log: clog, CanFlush: true, Type: config.Type, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 687465d9493..0f6e1a21e2f 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -29,7 +29,9 @@ func getDBClient(t *testing.T) *database.Client { dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) - testDBClient, err := database.NewClient(&csconfig.DatabaseCfg{ + ctx := context.Background() + + testDBClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", DbPath: dbPath.Name(), @@ -215,7 +217,7 @@ func TestRegexpCacheBehavior(t *testing.T) { err = FileInit(TestFolder, filename, "regex") require.NoError(t, err) - //cache with no TTL + // cache with no TTL err = RegexpCacheInit(filename, types.DataSource{Type: "regex", Size: ptr.Of(1)}) require.NoError(t, err) @@ -227,7 +229,7 @@ func TestRegexpCacheBehavior(t *testing.T) { assert.True(t, ret.(bool)) assert.Equal(t, 1, dataFileRegexCache[filename].Len(false)) - //cache with TTL + // cache with TTL ttl := 500 * time.Millisecond err = RegexpCacheInit(filename, types.DataSource{Type: "regex", Size: ptr.Of(2), TTL: &ttl}) require.NoError(t, err) @@ -994,6 +996,7 @@ func TestGetDecisionsCount(t *testing.T) { log.Printf("test '%s' : OK", test.name) } } + func TestGetDecisionsSinceCount(t *testing.T) { existingIP := "1.2.3.4" unknownIP := "1.2.3.5" @@ -1365,6 +1368,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { require.NoError(t, err) output, err := expr.Run(program, test.env) require.NoError(t, err) + switch o := output.(type) { case time.Duration: require.LessOrEqual(t, int(o.Seconds()), int(test.max)) @@ -1376,7 +1380,6 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { t.Fatalf("GetActiveDecisionsTimeLeft() should return a time.Duration or a float64") } } - } func TestParseUnixTime(t *testing.T) { @@ -1415,9 +1418,11 @@ func TestParseUnixTime(t *testing.T) { t.Run(tc.name, func(t *testing.T) { output, err := ParseUnixTime(tc.value) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } + require.WithinDuration(t, tc.expected, output.(time.Time), time.Second) }) } @@ -1520,6 +1525,7 @@ func TestIsIp(t *testing.T) { require.Error(t, err) return } + require.NoError(t, err) output, err := expr.Run(vm, map[string]interface{}{"value": tc.value}) require.NoError(t, err) @@ -1619,12 +1625,15 @@ func TestB64Decode(t *testing.T) { require.Error(t, err) return } + require.NoError(t, err) + output, err := expr.Run(vm, map[string]interface{}{"value": tc.value}) if tc.expectedRuntimeErr { require.Error(t, err) return } + require.NoError(t, err) require.Equal(t, tc.expected, output) }) From fd433a7e738dd7da0d0102bc957ed8cd86a0ab98 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:07:10 +0200 Subject: [PATCH 184/318] cscli: refactor "cscli setup" to avoid global variable (#3075) * cscli refactor: extract method cliSetup.detect() * cscli refactor: extract method cliSetup.install() * cscli refactor: extract method cliSetup.dataSources() * cscli refactor: method ccliSetup.validate() * skip redundant pre-loop checks --- cmd/crowdsec-cli/setup.go | 194 ++++++++++++++++---------------------- pkg/setup/install.go | 102 +++++++++----------- 2 files changed, 126 insertions(+), 170 deletions(-) diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 9f685d0fac1..d747af9225f 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "context" "errors" "fmt" "os" @@ -44,57 +45,85 @@ func (cli *cliSetup) NewCommand() *cobra.Command { return cmd } +type detectFlags struct { + detectConfigFile string + listSupportedServices bool + forcedUnits []string + forcedProcesses []string + forcedOSFamily string + forcedOSID string + forcedOSVersion string + skipServices []string + snubSystemd bool + outYaml bool +} + +func (f *detectFlags) bind(cmd *cobra.Command) { + defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") + + flags := cmd.Flags() + flags.StringVar(&f.detectConfigFile, "detect-config", defaultServiceDetect, "path to service detection configuration") + flags.BoolVar(&f.listSupportedServices, "list-supported-services", false, "do not detect; only print supported services") + flags.StringSliceVar(&f.forcedUnits, "force-unit", nil, "force detection of a systemd unit (can be repeated)") + flags.StringSliceVar(&f.forcedProcesses, "force-process", nil, "force detection of a running process (can be repeated)") + flags.StringSliceVar(&f.skipServices, "skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") + flags.StringVar(&f.forcedOSFamily, "force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") + flags.StringVar(&f.forcedOSID, "force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") + flags.StringVar(&f.forcedOSVersion, "force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") + flags.BoolVar(&f.snubSystemd, "snub-systemd", false, "don't use systemd, even if available") + flags.BoolVar(&f.outYaml, "yaml", false, "output yaml, not json") +} + func (cli *cliSetup) NewDetectCmd() *cobra.Command { + f := detectFlags{} + cmd := &cobra.Command{ Use: "detect", Short: "detect running services, generate a setup file", DisableAutoGenTag: true, - RunE: runSetupDetect, + RunE: func(_ *cobra.Command, args []string) error { + return cli.detect(f) + }, } - defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") - - flags := cmd.Flags() - flags.String("detect-config", defaultServiceDetect, "path to service detection configuration") - flags.Bool("list-supported-services", false, "do not detect; only print supported services") - flags.StringSlice("force-unit", nil, "force detection of a systemd unit (can be repeated)") - flags.StringSlice("force-process", nil, "force detection of a running process (can be repeated)") - flags.StringSlice("skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") - flags.String("force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") - flags.String("force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") - flags.String("force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") - flags.Bool("snub-systemd", false, "don't use systemd, even if available") - flags.Bool("yaml", false, "output yaml, not json") - + f.bind(cmd) return cmd } func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { + var dryRun bool + cmd := &cobra.Command{ Use: "install-hub [setup_file] [flags]", Short: "install items from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runSetupInstallHub, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), dryRun, args[0]) + }, } flags := cmd.Flags() - flags.Bool("dry-run", false, "don't install anything; print out what would have been") + flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") return cmd } func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { + var toDir string + cmd := &cobra.Command{ Use: "datasources [setup_file] [flags]", Short: "generate datasource (acquisition) configuration from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runSetupDataSources, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.dataSources(args[0], toDir) + }, } flags := cmd.Flags() - flags.String("to-dir", "", "write the configuration to a directory, in multiple files") + flags.StringVar(&toDir, "to-dir", "", "write the configuration to a directory, in multiple files") return cmd } @@ -105,97 +134,50 @@ func (cli *cliSetup) NewValidateCmd() *cobra.Command { Short: "validate a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runSetupValidate, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(args[0]) + }, } return cmd } -func runSetupDetect(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - detectConfigFile, err := flags.GetString("detect-config") - if err != nil { - return err - } - - var detectReader *os.File +func (cli *cliSetup) detect(f detectFlags) error { + var ( + detectReader *os.File + err error + ) - switch detectConfigFile { + switch f.detectConfigFile { case "-": log.Tracef("Reading detection rules from stdin") detectReader = os.Stdin default: - log.Tracef("Reading detection rules: %s", detectConfigFile) + log.Tracef("Reading detection rules: %s", f.detectConfigFile) - detectReader, err = os.Open(detectConfigFile) + detectReader, err = os.Open(f.detectConfigFile) if err != nil { return err } } - listSupportedServices, err := flags.GetBool("list-supported-services") - if err != nil { - return err - } - - forcedUnits, err := flags.GetStringSlice("force-unit") - if err != nil { - return err - } - - forcedProcesses, err := flags.GetStringSlice("force-process") - if err != nil { - return err - } - - forcedOSFamily, err := flags.GetString("force-os-family") - if err != nil { - return err - } - - forcedOSID, err := flags.GetString("force-os-id") - if err != nil { - return err - } - - forcedOSVersion, err := flags.GetString("force-os-version") - if err != nil { - return err - } - - skipServices, err := flags.GetStringSlice("skip-service") - if err != nil { - return err - } - - snubSystemd, err := flags.GetBool("snub-systemd") - if err != nil { - return err - } - - if !snubSystemd { + if !f.snubSystemd { _, err := exec.LookPath("systemctl") if err != nil { log.Debug("systemctl not available: snubbing systemd") - snubSystemd = true + f.snubSystemd = true } } - outYaml, err := flags.GetBool("yaml") - if err != nil { - return err - } - - if forcedOSFamily == "" && forcedOSID != "" { + if f.forcedOSFamily == "" && f.forcedOSID != "" { log.Debug("force-os-id is set: force-os-family defaults to 'linux'") - forcedOSFamily = "linux" + f.forcedOSFamily = "linux" } - if listSupportedServices { + if f.listSupportedServices { supported, err := setup.ListSupported(detectReader) if err != nil { return err @@ -209,15 +191,15 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { } opts := setup.DetectOptions{ - ForcedUnits: forcedUnits, - ForcedProcesses: forcedProcesses, + ForcedUnits: f.forcedUnits, + ForcedProcesses: f.forcedProcesses, ForcedOS: setup.ExprOS{ - Family: forcedOSFamily, - ID: forcedOSID, - RawVersion: forcedOSVersion, + Family: f.forcedOSFamily, + ID: f.forcedOSID, + RawVersion: f.forcedOSVersion, }, - SkipServices: skipServices, - SnubSystemd: snubSystemd, + SkipServices: f.skipServices, + SnubSystemd: f.snubSystemd, } hubSetup, err := setup.Detect(detectReader, opts) @@ -225,7 +207,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { return fmt.Errorf("detecting services: %w", err) } - setup, err := setupAsString(hubSetup, outYaml) + setup, err := setupAsString(hubSetup, f.outYaml) if err != nil { return err } @@ -273,16 +255,7 @@ func setupAsString(cs setup.Setup, outYaml bool) (string, error) { return string(ret), nil } -func runSetupDataSources(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - fromFile := args[0] - - toDir, err := flags.GetString("to-dir") - if err != nil { - return err - } - +func (cli *cliSetup) dataSources(fromFile string, toDir string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading setup file: %w", err) @@ -300,32 +273,23 @@ func runSetupDataSources(cmd *cobra.Command, args []string) error { return nil } -func runSetupInstallHub(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - fromFile := args[0] - - dryRun, err := flags.GetBool("dry-run") - if err != nil { - return err - } - +func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading file %s: %w", fromFile, err) } - hub, err := require.Hub(csConfig, require.RemoteHub(cmd.Context(), csConfig), log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } - return setup.InstallHubItems(cmd.Context(), hub, input, dryRun) + return setup.InstallHubItems(ctx, hub, input, dryRun) } -func runSetupValidate(cmd *cobra.Command, args []string) error { - fromFile := args[0] - +func (cli *cliSetup) validate(fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading stdin: %w", err) diff --git a/pkg/setup/install.go b/pkg/setup/install.go index 09ac15971f8..fc5bd380fd9 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -62,79 +62,71 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun b continue } - if len(install.Collections) > 0 { - for _, collection := range setupItem.Install.Collections { - item := hub.GetItem(cwhub.COLLECTIONS, collection) - if item == nil { - return fmt.Errorf("collection %s not found", collection) - } - - if dryRun { - fmt.Println("dry-run: would install collection", collection) - - continue - } - - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing collection %s: %w", item.Name, err) - } + for _, collection := range setupItem.Install.Collections { + item := hub.GetItem(cwhub.COLLECTIONS, collection) + if item == nil { + return fmt.Errorf("collection %s not found", collection) + } + + if dryRun { + fmt.Println("dry-run: would install collection", collection) + + continue + } + + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing collection %s: %w", item.Name, err) } } - if len(install.Parsers) > 0 { - for _, parser := range setupItem.Install.Parsers { - if dryRun { - fmt.Println("dry-run: would install parser", parser) + for _, parser := range setupItem.Install.Parsers { + if dryRun { + fmt.Println("dry-run: would install parser", parser) - continue - } + continue + } - item := hub.GetItem(cwhub.PARSERS, parser) - if item == nil { - return fmt.Errorf("parser %s not found", parser) - } + item := hub.GetItem(cwhub.PARSERS, parser) + if item == nil { + return fmt.Errorf("parser %s not found", parser) + } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing parser %s: %w", item.Name, err) - } + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing parser %s: %w", item.Name, err) } } - if len(install.Scenarios) > 0 { - for _, scenario := range setupItem.Install.Scenarios { - if dryRun { - fmt.Println("dry-run: would install scenario", scenario) + for _, scenario := range setupItem.Install.Scenarios { + if dryRun { + fmt.Println("dry-run: would install scenario", scenario) - continue - } + continue + } - item := hub.GetItem(cwhub.SCENARIOS, scenario) - if item == nil { - return fmt.Errorf("scenario %s not found", scenario) - } + item := hub.GetItem(cwhub.SCENARIOS, scenario) + if item == nil { + return fmt.Errorf("scenario %s not found", scenario) + } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing scenario %s: %w", item.Name, err) - } + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing scenario %s: %w", item.Name, err) } } - if len(install.PostOverflows) > 0 { - for _, postoverflow := range setupItem.Install.PostOverflows { - if dryRun { - fmt.Println("dry-run: would install postoverflow", postoverflow) + for _, postoverflow := range setupItem.Install.PostOverflows { + if dryRun { + fmt.Println("dry-run: would install postoverflow", postoverflow) - continue - } + continue + } - item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) - if item == nil { - return fmt.Errorf("postoverflow %s not found", postoverflow) - } + item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) + if item == nil { + return fmt.Errorf("postoverflow %s not found", postoverflow) + } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) - } + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) } } } From 73792eacb60ae30919f3dd741301fd9690632f90 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:49:19 +0200 Subject: [PATCH 185/318] refactor pkg/parser: extract processGrok (#3080) * pkg/parser: extract method processGrok() * early return * early return/2 --- pkg/parser/node.go | 148 ++++++++++++++++++++++++--------------------- 1 file changed, 79 insertions(+), 69 deletions(-) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 74c9e94a331..4bb2c4c5eaf 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -202,9 +202,84 @@ func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.E return isWhitelisted, nil } -func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { + +func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, bool, error) { + // Process grok if present, should be exclusive with nodes :) + clog := n.Logger var NodeHasOKGrok bool + gstr := "" + + if n.Grok.RunTimeRegexp == nil { + clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) + return true, false, nil + } + + clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) + // for unparsed, parsed etc. set sensible defaults to reduce user hassle + if n.Grok.TargetField != "" { + // it's a hack to avoid using real reflect + if n.Grok.TargetField == "Line.Raw" { + gstr = p.Line.Raw + } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { + gstr = val + } else { + clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) + return false, false, nil + } + } else if n.Grok.RunTimeValue != nil { + output, err := exprhelpers.Run(n.Grok.RunTimeValue, cachedExprEnv, clog, n.Debug) + if err != nil { + clog.Warningf("failed to run RunTimeValue : %v", err) + return false, false, nil + } + + switch out := output.(type) { + case string: + gstr = out + case int: + gstr = fmt.Sprintf("%d", out) + case float64, float32: + gstr = fmt.Sprintf("%f", out) + default: + clog.Errorf("unexpected return type for RunTimeValue : %T", output) + } + } + + var groklabel string + if n.Grok.RegexpName == "" { + groklabel = fmt.Sprintf("%5.5s...", n.Grok.RegexpValue) + } else { + groklabel = n.Grok.RegexpName + } + + grok := n.Grok.RunTimeRegexp.Parse(gstr) + + if len(grok) == 0 { + // grok failed, node failed + clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) + return false, false, nil + } + + /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ + NodeHasOKGrok = true + clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) + // We managed to grok stuff, merged into parse + for k, v := range grok { + clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) + p.Parsed[k] = v + } + // if the grok succeed, process associated statics + err := n.ProcessStatics(n.Grok.Statics, p) + if err != nil { + clog.Errorf("(%s) Failed to process statics : %v", n.rn, err) + return false, false, err + } + + return true, NodeHasOKGrok, nil +} + +func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { clog := n.Logger cachedExprEnv := expressionEnv @@ -229,74 +304,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri return false, err } - // Process grok if present, should be exclusive with nodes :) - gstr := "" - - if n.Grok.RunTimeRegexp != nil { - clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) - // for unparsed, parsed etc. set sensible defaults to reduce user hassle - if n.Grok.TargetField != "" { - // it's a hack to avoid using real reflect - if n.Grok.TargetField == "Line.Raw" { - gstr = p.Line.Raw - } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { - gstr = val - } else { - clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) - - NodeState = false - } - } else if n.Grok.RunTimeValue != nil { - output, err := exprhelpers.Run(n.Grok.RunTimeValue, cachedExprEnv, clog, n.Debug) - if err != nil { - clog.Warningf("failed to run RunTimeValue : %v", err) - - NodeState = false - } - - switch out := output.(type) { - case string: - gstr = out - case int: - gstr = fmt.Sprintf("%d", out) - case float64, float32: - gstr = fmt.Sprintf("%f", out) - default: - clog.Errorf("unexpected return type for RunTimeValue : %T", output) - } - } - - var groklabel string - if n.Grok.RegexpName == "" { - groklabel = fmt.Sprintf("%5.5s...", n.Grok.RegexpValue) - } else { - groklabel = n.Grok.RegexpName - } - - grok := n.Grok.RunTimeRegexp.Parse(gstr) - if len(grok) > 0 { - /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ - NodeHasOKGrok = true - - clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) - // We managed to grok stuff, merged into parse - for k, v := range grok { - clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) - p.Parsed[k] = v - } - // if the grok succeed, process associated statics - err := n.ProcessStatics(n.Grok.Statics, p) - if err != nil { - clog.Errorf("(%s) Failed to process statics : %v", n.rn, err) - return false, err - } - } else { - // grok failed, node failed - clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) - NodeState = false - } - } else { - clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) + NodeState, NodeHasOKGrok, err := n.processGrok(p, cachedExprEnv) + if err != nil { + return false, err } // Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok From e859a751e9e431845d4f309b5c858f5cb7ca8256 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 13:04:47 +0200 Subject: [PATCH 186/318] lint: disable redundant linters, update revive configuration and code metrics (#3081) --- .golangci.yml | 53 +++++++++++++++++----------------------- pkg/csconfig/database.go | 5 +++- 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 925005a5b05..62fca6eb1d1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,28 +16,12 @@ linters-settings: # lower this after refactoring min-complexity: 128 - funlen: - # Checks the number of lines in a function. - # If lower than 0, disable the check. - # Default: 60 - # lower this after refactoring - lines: 437 - # Checks the number of statements in a function. - # If lower than 0, disable the check. - # Default: 40 - # lower this after refactoring - statements: 122 - govet: enable-all: true disable: - reflectvaluecompare - fieldalignment - lll: - # lower this after refactoring - line-length: 2607 - maintidx: # raise this after refactoring under: 11 @@ -125,7 +109,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [45] + arguments: [42] - name: defer disabled: true - name: empty-block @@ -137,9 +121,10 @@ linters-settings: - name: flag-parameter disabled: true - name: function-result-limit - disabled: true + arguments: [6] - name: function-length - disabled: true + # lower this after refactoring + arguments: [110, 235] - name: get-return disabled: true - name: increment-decrement @@ -149,9 +134,11 @@ linters-settings: - name: import-shadowing disabled: true - name: line-length-limit - disabled: true + # lower this after refactoring + arguments: [221] - name: max-control-nesting - disabled: true + # lower this after refactoring + arguments: [7] - name: max-public-structs disabled: true - name: optimize-operands-order @@ -211,6 +198,8 @@ linters: - gocyclo # revive - cyclop # revive + - lll # revive + - funlen # revive # # Disabled until fixed for go 1.22 @@ -234,7 +223,6 @@ linters: # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. # - exportloopref # checks for pointers to enclosing loop variables - # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code @@ -252,7 +240,6 @@ linters: # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. - # - lll # Reports long lines # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). # - logrlint # Check logr arguments. # - maintidx # maintidx measures the maintainability index of each function. @@ -473,14 +460,6 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" - - linters: - - revive - path: pkg/metabase/metabase.go - - - linters: - - revive - path: pkg/metabase/container.go - - linters: - revive path: cmd/crowdsec-cli/copyfile.go @@ -494,6 +473,18 @@ issues: - canonicalheader path: pkg/apiserver/middlewares/v1/tls_auth.go + # tolerate long functions in tests + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "function-length: .*" + + # tolerate long lines in tests + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "line-length-limit: .*" + # tolerate deep exit in tests, for now - linters: - revive diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 2fe610eba68..d30cc20a938 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -79,7 +79,10 @@ func (c *Config) LoadDBConfig(inCli bool) error { switch { case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) - log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") + log.Warning( + "You are using sqlite without WAL, this can have a performance impact. " + + "If you do not store the database in a network share, set db_config.use_wal to true. " + + "Set explicitly to false to disable this warning.") case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) From 4e09ae21828c3b8fb3c9c6a015c8162ebd53f4cc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 13:06:44 +0200 Subject: [PATCH 187/318] cscli: fixed some inconsistency in returning errors (#3076) --- cmd/crowdsec-cli/alerts.go | 2 +- cmd/crowdsec-cli/hubtest.go | 9 ++++----- cmd/crowdsec-cli/lapi.go | 7 ++----- cmd/crowdsec-cli/simulation.go | 4 ++-- cmd/crowdsec-cli/utils.go | 9 --------- 5 files changed, 9 insertions(+), 22 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 7c9c5f23032..0fe9852519c 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -540,7 +540,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - printHelp(cmd) + _ = cmd.Help() return errors.New("missing alert_id") } return cli.inspect(details, args...) diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 458de672349..d58631e26c9 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -251,7 +251,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { cfg := cli.cfg() if !runAll && len(args) == 0 { - printHelp(cmd) + _ = cmd.Help() return errors.New("please provide test to run or --all flag") } hubPtr.NucleiTargetHost = NucleiTargetHost @@ -305,8 +305,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } - fmt.Printf("\nPlease fill your assert file(s) for test '%s', exiting\n", test.Name) - os.Exit(1) + return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) } testResult[test.Name] = test.Success if test.Success { @@ -389,7 +388,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { } if !success { - os.Exit(1) + return errors.New("some tests failed") } return nil @@ -580,7 +579,7 @@ func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { case showAppsecCov: fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) } - os.Exit(0) + return nil } switch cfg.Cscli.Output { diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index d2955230de9..0b8bc59dad5 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -376,8 +376,8 @@ cscli lapi context detect crowdsecurity/sshd-logs RunE: func(cmd *cobra.Command, args []string) error { cfg := cli.cfg() if !detectAll && len(args) == 0 { - log.Infof("Please provide parsers to detect or --all flag.") - printHelp(cmd) + _ = cmd.Help() + return errors.New("please provide parsers to detect or --all flag") } // to avoid all the log.Info from the loaders functions @@ -491,9 +491,6 @@ func (cli *cliLapi) newContextCmd() *cobra.Command { return nil }, - Run: func(cmd *cobra.Command, _ []string) { - printHelp(cmd) - }, } cmd.AddCommand(cli.newContextAddCmd()) diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 3301c4b797e..f8d8a660b8c 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -107,7 +107,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { return fmt.Errorf("unable to enable global simulation mode: %w", err) } } else { - printHelp(cmd) + _ = cmd.Help() } return nil @@ -154,7 +154,7 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { return fmt.Errorf("unable to disable global simulation mode: %w", err) } } else { - printHelp(cmd) + _ = cmd.Help() } return nil diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index 1ae8f9da2ed..f6c32094958 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -5,18 +5,9 @@ import ( "net" "strings" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/pkg/types" ) -func printHelp(cmd *cobra.Command) { - if err := cmd.Help(); err != nil { - log.Fatalf("unable to print help(): %s", err) - } -} - func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { /*if a range is provided, change the scope*/ if *ipRange != "" { From a529e66cd893018f93ddebf0c968a772e8a6ae47 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:07:44 +0200 Subject: [PATCH 188/318] Typos (#3084) * comment fix * redundancy * typo nill -> nil * remove extra newline from log --- cmd/crowdsec-cli/support.go | 2 +- cmd/crowdsec/parse.go | 2 +- pkg/apiserver/apiserver_test.go | 2 +- pkg/database/flush.go | 2 +- pkg/exprhelpers/crowdsec_cti_test.go | 2 +- pkg/exprhelpers/debugger_test.go | 10 +++++----- pkg/parser/runtime.go | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 3b0f53cd6e1..061733ef8d3 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -427,7 +427,7 @@ func (cli *cliSupport) writeToZip(zipWriter *zip.Writer, filename string, mtime } } -// writeToZip adds a file to the zip archive, from a file, and retains the mtime +// writeFileToZip adds a file to the zip archive, from a file, and retains the mtime func (cli *cliSupport) writeFileToZip(zw *zip.Writer, filename string, fromFile string) { mtime := time.Now() diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index 39eedfc858e..26eae66be2b 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -37,7 +37,7 @@ func runParse(input chan types.Event, output chan types.Event, parserCTX parser. /* parse the log using magic */ parsed, err := parser.Parse(parserCTX, event, nodes) if err != nil { - log.Errorf("failed parsing : %v\n", err) + log.Errorf("failed parsing: %v", err) } elapsed := time.Since(startParsing) globalParsingHistogram.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Observe(elapsed.Seconds()) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 20c48337833..26531a592da 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -294,7 +294,7 @@ func TestWithWrongFlushConfig(t *testing.T) { config.API.Server.DbConfig.Flush.MaxItems = &maxItems apiServer, err := NewServer(config.API.Server) - cstest.RequireErrorContains(t, err, "max_items can't be zero or negative number") + cstest.RequireErrorContains(t, err, "max_items can't be zero or negative") assert.Nil(t, apiServer) } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 56e42715b2c..5a1f0bea5bf 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -22,7 +22,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched maxAge := "" if config.MaxItems != nil && *config.MaxItems <= 0 { - return nil, errors.New("max_items can't be zero or negative number") + return nil, errors.New("max_items can't be zero or negative") } if config.MaxItems != nil { diff --git a/pkg/exprhelpers/crowdsec_cti_test.go b/pkg/exprhelpers/crowdsec_cti_test.go index 4c0346e447c..9f78b932d6d 100644 --- a/pkg/exprhelpers/crowdsec_cti_test.go +++ b/pkg/exprhelpers/crowdsec_cti_test.go @@ -109,7 +109,7 @@ func smokeHandler(req *http.Request) *http.Response { } } -func TestNillClient(t *testing.T) { +func TestNilClient(t *testing.T) { defer ShutdownCrowdsecCTI() if err := InitCrowdsecCTI(ptr.Of(""), nil, nil, nil); !errors.Is(err, cticlient.ErrDisabled) { diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 9c713a8d4f5..6832b4efaa8 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -59,7 +59,7 @@ func TestBaseDbg(t *testing.T) { "base_string": "hello world", "base_int": 42, "base_float": 42.42, - "nillvar": &teststruct{}, + "nilvar": &teststruct{}, "base_struct": struct { Foo string Bar int @@ -79,8 +79,8 @@ func TestBaseDbg(t *testing.T) { //Missing multi parametes function tests := []ExprDbgTest{ { - Name: "nill deref", - Expr: "Upper('1') == '1' && nillvar.Foo == '42'", + Name: "nil deref", + Expr: "Upper('1') == '1' && nilvar.Foo == '42'", Env: defaultEnv, ExpectedFailRuntime: true, ExpectedOutputs: []OpOutput{ @@ -307,8 +307,8 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected compile error : %s", test.Name, err) } } - if test.Name == "nill deref" { - test.Env["nillvar"] = nil + if test.Name == "nil deref" { + test.Env["nilvar"] = nil } outdbg, ret, err := RunWithDebug(prog, test.Env, logger) if test.ExpectedFailRuntime { diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 1596ef5ffd9..8068690b68f 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -42,8 +42,8 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { iter := reflect.ValueOf(evt).Elem() if (iter == reflect.Value{}) || iter.IsZero() { - log.Tracef("event is nill") - //event is nill + log.Tracef("event is nil") + //event is nil return false } for _, f := range strings.Split(target, ".") { From 2f6d4cccd7729ff809c920c3dea128c51e0fbe9b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:17:27 +0200 Subject: [PATCH 189/318] cscli: refactor hubtests / extract methods run(), coverage() (#3086) --- .golangci.yml | 14 +- cmd/crowdsec-cli/hubtest.go | 319 ++++++++++++++++++------------------ 2 files changed, 171 insertions(+), 162 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 62fca6eb1d1..3e402529180 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,7 +14,7 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 128 + min-complexity: 118 govet: enable-all: true @@ -24,7 +24,7 @@ linters-settings: maintidx: # raise this after refactoring - under: 11 + under: 16 misspell: locale: US @@ -473,6 +473,11 @@ issues: - canonicalheader path: pkg/apiserver/middlewares/v1/tls_auth.go + # tolerate complex functions in tests for now + - linters: + - maintidx + path: "(.+)_test.go" + # tolerate long functions in tests - linters: - revive @@ -512,11 +517,6 @@ issues: path: "cmd/crowdsec-cli/utils.go" text: "deep-exit: .*" - - linters: - - revive - path: "cmd/crowdsec-cli/hubtest.go" - text: "deep-exit: .*" - - linters: - revive path: "pkg/leakybucket/overflows.go" diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index d58631e26c9..8796fa48a17 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -234,6 +234,44 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios return cmd } + +func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { + cfg := cli.cfg() + + if !runAll && len(args) == 0 { + return errors.New("please provide test to run or --all flag") + } + hubPtr.NucleiTargetHost = NucleiTargetHost + hubPtr.AppSecHost = AppSecHost + if runAll { + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + } else { + for _, testName := range args { + _, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + } + } + + // set timezone to avoid DST issues + os.Setenv("TZ", "UTC") + for _, test := range hubPtr.Tests { + if cfg.Cscli.Output == "human" { + log.Infof("Running test '%s'", test.Name) + } + err := test.Run() + if err != nil { + log.Errorf("running test '%s' failed: %+v", test.Name, err) + } + } + + return nil +} + + func (cli *cliHubTest) NewRunCmd() *cobra.Command { var ( noClean bool @@ -247,41 +285,8 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { Use: "run", Short: "run [test_name]", DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - cfg := cli.cfg() - - if !runAll && len(args) == 0 { - _ = cmd.Help() - return errors.New("please provide test to run or --all flag") - } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost - if runAll { - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - } else { - for _, testName := range args { - _, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - } - } - - // set timezone to avoid DST issues - os.Setenv("TZ", "UTC") - for _, test := range hubPtr.Tests { - if cfg.Cscli.Output == "human" { - log.Infof("Running test '%s'", test.Name) - } - err := test.Run() - if err != nil { - log.Errorf("running test '%s' failed: %+v", test.Name, err) - } - } - - return nil + RunE: func(_ *cobra.Command, args []string) error { + return cli.run(runAll, NucleiTargetHost, AppSecHost, args) }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -493,140 +498,144 @@ func (cli *cliHubTest) NewListCmd() *cobra.Command { return cmd } -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { - var ( - showParserCov bool - showScenarioCov bool - showOnlyPercent bool - showAppsecCov bool - ) +func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { + cfg := cli.cfg() - cmd := &cobra.Command{ - Use: "coverage", - Short: "coverage", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - // for this one we explicitly don't do for appsec - if err := HubTest.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - var err error - scenarioCoverage := []hubtest.Coverage{} - parserCoverage := []hubtest.Coverage{} - appsecRuleCoverage := []hubtest.Coverage{} - scenarioCoveragePercent := 0 - parserCoveragePercent := 0 - appsecRuleCoveragePercent := 0 - - // if both are false (flag by default), show both - showAll := !showScenarioCov && !showParserCov && !showAppsecCov - - if showParserCov || showAll { - parserCoverage, err = HubTest.GetParsersCoverage() - if err != nil { - return fmt.Errorf("while getting parser coverage: %w", err) - } - parserTested := 0 - for _, test := range parserCoverage { - if test.TestsCount > 0 { - parserTested++ - } - } - parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) + // for this one we explicitly don't do for appsec + if err := HubTest.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + var err error + scenarioCoverage := []hubtest.Coverage{} + parserCoverage := []hubtest.Coverage{} + appsecRuleCoverage := []hubtest.Coverage{} + scenarioCoveragePercent := 0 + parserCoveragePercent := 0 + appsecRuleCoveragePercent := 0 + + // if both are false (flag by default), show both + showAll := !showScenarioCov && !showParserCov && !showAppsecCov + + if showParserCov || showAll { + parserCoverage, err = HubTest.GetParsersCoverage() + if err != nil { + return fmt.Errorf("while getting parser coverage: %w", err) + } + parserTested := 0 + for _, test := range parserCoverage { + if test.TestsCount > 0 { + parserTested++ } + } + parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) + } - if showScenarioCov || showAll { - scenarioCoverage, err = HubTest.GetScenariosCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - scenarioTested := 0 - for _, test := range scenarioCoverage { - if test.TestsCount > 0 { - scenarioTested++ - } - } + if showScenarioCov || showAll { + scenarioCoverage, err = HubTest.GetScenariosCoverage() + if err != nil { + return fmt.Errorf("while getting scenario coverage: %w", err) + } - scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) + scenarioTested := 0 + for _, test := range scenarioCoverage { + if test.TestsCount > 0 { + scenarioTested++ } + } - if showAppsecCov || showAll { - appsecRuleCoverage, err = HubTest.GetAppsecCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } + scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) + } - appsecRuleTested := 0 - for _, test := range appsecRuleCoverage { - if test.TestsCount > 0 { - appsecRuleTested++ - } - } - appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) - } + if showAppsecCov || showAll { + appsecRuleCoverage, err = HubTest.GetAppsecCoverage() + if err != nil { + return fmt.Errorf("while getting scenario coverage: %w", err) + } - if showOnlyPercent { - switch { - case showAll: - fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - case showParserCov: - fmt.Printf("parsers=%d%%", parserCoveragePercent) - case showScenarioCov: - fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - case showAppsecCov: - fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) - } - return nil + appsecRuleTested := 0 + for _, test := range appsecRuleCoverage { + if test.TestsCount > 0 { + appsecRuleTested++ } + } + appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) + } - switch cfg.Cscli.Output { - case "human": - if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, parserCoverage) - } + if showOnlyPercent { + switch { + case showAll: + fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) + case showParserCov: + fmt.Printf("parsers=%d%%", parserCoveragePercent) + case showScenarioCov: + fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) + case showAppsecCov: + fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) + } + return nil + } - if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, scenarioCoverage) - } + switch cfg.Cscli.Output { + case "human": + if showParserCov || showAll { + hubTestParserCoverageTable(color.Output, parserCoverage) + } + + if showScenarioCov || showAll { + hubTestScenarioCoverageTable(color.Output, scenarioCoverage) + } + + if showAppsecCov || showAll { + hubTestAppsecRuleCoverageTable(color.Output, appsecRuleCoverage) + } + + fmt.Println() + if showParserCov || showAll { + fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) + } + if showScenarioCov || showAll { + fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) + } + if showAppsecCov || showAll { + fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) + } + case "json": + dump, err := json.MarshalIndent(parserCoverage, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", dump) + default: + return errors.New("only human/json output modes are supported") + } - if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, appsecRuleCoverage) - } + return nil +} - fmt.Println() - if showParserCov || showAll { - fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) - } - if showScenarioCov || showAll { - fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) - } - if showAppsecCov || showAll { - fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) - } - case "json": - dump, err := json.MarshalIndent(parserCoverage, "", " ") - if err != nil { - return err - } - fmt.Printf("%s", dump) - dump, err = json.MarshalIndent(scenarioCoverage, "", " ") - if err != nil { - return err - } - fmt.Printf("%s", dump) - dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") - if err != nil { - return err - } - fmt.Printf("%s", dump) - default: - return errors.New("only human/json output modes are supported") - } +func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) - return nil + cmd := &cobra.Command{ + Use: "coverage", + Short: "coverage", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) }, } From e6ebf7af223e36f47d06dc009ecd4a308da34464 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:33:01 +0200 Subject: [PATCH 190/318] enable linter: revive (superfluous-else) (#3082) * enable linter: revive (superfluous-else) * lint (whitespace) --- .golangci.yml | 2 - pkg/parser/parsing_test.go | 87 +++++++++++++++++++++++++++----------- 2 files changed, 63 insertions(+), 26 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 3e402529180..bb20d42699f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -149,8 +149,6 @@ linters-settings: disabled: true - name: struct-tag disabled: true - - name: superfluous-else - disabled: true - name: time-equal disabled: true - name: var-naming diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 534b433a1b9..c5b1c353e9f 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -30,6 +30,7 @@ func TestParser(t *testing.T) { debug = true log.SetLevel(log.InfoLevel) + envSetting := os.Getenv("TEST_ONLY") pctx, ectx, err := prepTests() @@ -208,6 +209,7 @@ func loadTestFile(file string) []TestFile { dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + var testSet []TestFile for { @@ -232,23 +234,26 @@ func loadTestFile(file string) []TestFile { func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bool) { var retInfo []string - var valid = false + + valid := false expectMaps := []map[string]string{expected.Parsed, expected.Meta, expected.Enriched} outMaps := []map[string]string{out.Parsed, out.Meta, out.Enriched} outLabels := []string{"Parsed", "Meta", "Enriched"} - //allow to check as well for stage and processed flags + // allow to check as well for stage and processed flags if expected.Stage != "" { if expected.Stage != out.Stage { if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch stage %s != %s", expected.Stage, out.Stage)) } + goto checkFinished - } else { - valid = true - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok stage %s == %s", expected.Stage, out.Stage)) - } + } + + valid = true + + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok stage %s == %s", expected.Stage, out.Stage)) } } @@ -256,26 +261,30 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch process %t != %t", expected.Process, out.Process)) } + goto checkFinished - } else { - valid = true - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok process %t == %t", expected.Process, out.Process)) - } + } + + valid = true + + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok process %t == %t", expected.Process, out.Process)) } if expected.Whitelisted != out.Whitelisted { if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch whitelist %t != %t", expected.Whitelisted, out.Whitelisted)) } + goto checkFinished - } else { - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok whitelist %t == %t", expected.Whitelisted, out.Whitelisted)) - } - valid = true } + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok whitelist %t == %t", expected.Whitelisted, out.Whitelisted)) + } + + valid = true + for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] @@ -283,19 +292,26 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("missing entry %s[%s]", outLabels[mapIdx], expKey)) } + valid = false + goto checkFinished } - if outVal != expVal { //ok entry + + if outVal != expVal { // ok entry if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) } + valid = false + goto checkFinished } + if debug { retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) } + valid = true } } @@ -309,6 +325,7 @@ checkFinished: retInfo = append(retInfo, fmt.Sprintf("KO ! \n\t%s", strings.Join(retInfo, "\n\t"))) } } + return retInfo, valid } @@ -320,9 +337,10 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error if err != nil { log.Errorf("Failed to process %s : %v", spew.Sdump(in), err) } - //log.Infof("Parser output : %s", spew.Sdump(out)) + // log.Infof("Parser output : %s", spew.Sdump(out)) results = append(results, out) } + log.Infof("parsed %d lines", len(testSet.Lines)) log.Infof("got %d results", len(results)) @@ -336,15 +354,17 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error reCheck: failinfo := []string{} + for ridx, result := range results { for eidx, expected := range testSet.Results { explain, match := matchEvent(expected, result, debug) if match { log.Infof("expected %d/%d matches result %d/%d", eidx, len(testSet.Results), ridx, len(results)) + if len(explain) > 0 { log.Printf("-> %s", explain[len(explain)-1]) } - //don't do this at home : delete current element from list and redo + // don't do this at home : delete current element from list and redo results[len(results)-1], results[ridx] = results[ridx], results[len(results)-1] results = results[:len(results)-1] @@ -352,34 +372,40 @@ reCheck: testSet.Results = testSet.Results[:len(testSet.Results)-1] goto reCheck - } else { - failinfo = append(failinfo, explain...) } + + failinfo = append(failinfo, explain...) } } + if len(results) > 0 { log.Printf("Errors : %s", strings.Join(failinfo, " / ")) return false, fmt.Errorf("leftover results : %+v", results) } + if len(testSet.Results) > 0 { log.Printf("Errors : %s", strings.Join(failinfo, " / ")) return false, fmt.Errorf("leftover expected results : %+v", testSet.Results) } + return true, nil } func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { log.Warning("Going to process one test set") + for _, tf := range testSet { - //func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { + // func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { testOk, err := testSubSet(tf, pctx, nodes) if err != nil { log.Fatalf("test failed : %s", err) } + if !testOk { log.Fatalf("failed test : %+v", tf) } } + return true } @@ -404,48 +430,61 @@ func TestGeneratePatternsDoc(t *testing.T) { if err != nil { t.Fatalf("unable to load patterns : %s", err) } + log.Infof("-> %s", spew.Sdump(pctx)) /*don't judge me, we do it for the users*/ p := make(PairList, len(pctx.Grok.Patterns)) i := 0 + for key, val := range pctx.Grok.Patterns { p[i] = Pair{key, val} p[i].Value = strings.ReplaceAll(p[i].Value, "{%{", "\\{\\%\\{") i++ } + sort.Sort(p) - f, err := os.OpenFile("./patterns-documentation.md", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile("./patterns-documentation.md", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { t.Fatalf("failed to open : %s", err) } + if _, err := f.WriteString("# Patterns documentation\n\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("You will find here a generated documentation of all the patterns loaded by crowdsec.\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("They are sorted by pattern length, and are meant to be used in parsers, in the form %{PATTERN_NAME}.\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("\n\n"); err != nil { t.Fatal("failed to write to file") } + for _, k := range p { if _, err := fmt.Fprintf(f, "## %s\n\nPattern :\n```\n%s\n```\n\n", k.Key, k.Value); err != nil { t.Fatal("failed to write to file") } + fmt.Printf("%v\t%v\n", k.Key, k.Value) } + if _, err := f.WriteString("\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("# Documentation generation\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("This documentation is generated by `pkg/parser` : `GO_WANT_TEST_DOC=1 go test -run TestGeneratePatternsDoc`\n"); err != nil { t.Fatal("failed to write to file") } + f.Close() } From 44a2014f6266c076b87335b8fbb0e469511e44ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:16:46 +0200 Subject: [PATCH 191/318] db: don't set bouncer last_pull until first connection (#3020) * db: don't set bouncer last_pull until first connection * cscli bouncers prune: query creation date if they never connected --- cmd/crowdsec-cli/bouncers.go | 9 ++++++-- cmd/crowdsec-cli/bouncers_table.go | 7 +++++- pkg/apiserver/controllers/v1/decisions.go | 11 +++++++--- pkg/database/bouncers.go | 13 ++++++++++-- pkg/database/decisions.go | 15 +++++++++---- pkg/database/ent/bouncer.go | 11 ++++++---- pkg/database/ent/bouncer/bouncer.go | 2 -- pkg/database/ent/bouncer/where.go | 10 +++++++++ pkg/database/ent/bouncer_create.go | 9 +------- pkg/database/ent/bouncer_update.go | 18 ++++++++++++++++ pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/mutation.go | 21 +++++++++++++++++- pkg/database/ent/runtime.go | 4 ---- pkg/database/ent/schema/bouncer.go | 3 +-- test/bats/10_bouncers.bats | 26 +++++++++++++++++++++-- 15 files changed, 125 insertions(+), 36 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index f8628538378..0c96cc5eee4 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -116,7 +116,12 @@ func (cli *cliBouncers) list() error { valid = "pending" } - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType}); err != nil { + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { return fmt.Errorf("failed to write raw: %w", err) } } @@ -259,7 +264,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(-duration)) + bouncers, err := cli.db.QueryBouncersInactiveSince(time.Now().UTC().Add(-duration)) if err != nil { return fmt.Errorf("unable to query bouncers: %w", err) } diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go index 417eb9e8e0b..c32762ba266 100644 --- a/cmd/crowdsec-cli/bouncers_table.go +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -21,7 +21,12 @@ func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { revoked = emoji.Prohibited } - t.AddRow(b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType) + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AddRow(b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType) } t.Render() diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 543c832095a..3d8e0232224 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -72,7 +72,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { return } - if time.Now().UTC().Sub(bouncerInfo.LastPull) >= time.Minute { + if bouncerInfo.LastPull == nil || time.Now().UTC().Sub(*bouncerInfo.LastPull) >= time.Minute { if err := c.DBClient.UpdateBouncerLastPull(time.Now().UTC(), bouncerInfo.ID); err != nil { log.Errorf("failed to update bouncer last pull: %v", err) } @@ -186,7 +186,7 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return nil } -func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull time.Time, dbFunc func(time.Time, map[string][]string) ([]*ent.Decision, error)) error { +func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(*time.Time, map[string][]string) ([]*ent.Decision, error)) error { //respBuffer := bytes.NewBuffer([]byte{}) limit := 30000 //FIXME : make it configurable needComma := false @@ -348,8 +348,13 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en //data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) + since := time.Time{} + if bouncerInfo.LastPull != nil { + since = bouncerInfo.LastPull.Add(-2 * time.Second) + } + // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(bouncerInfo.LastPull.Add((-2 * time.Second)), filters) // do we want to give exactly lastPull time ? + data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(&since, filters) // do we want to give exactly lastPull time ? if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 2cc6b9dcb47..03a3227301d 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -115,6 +115,15 @@ func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id in return nil } -func (c *Client) QueryBouncersLastPulltimeLT(t time.Time) ([]*ent.Bouncer, error) { - return c.Ent.Bouncer.Query().Where(bouncer.LastPullLT(t)).All(c.CTX) +func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) { + return c.Ent.Bouncer.Query().Where( + // poor man's coalesce + bouncer.Or( + bouncer.LastPullLT(t), + bouncer.And( + bouncer.LastPullIsNil(), + bouncer.CreatedAtLT(t), + ), + ), + ).All(c.CTX) } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 294515d603e..fc582247e59 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -254,11 +254,15 @@ func longestDecisionForScopeTypeValue(s *sql.Selector) { ) } -func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), - decision.UntilGT(since), ) + + if since != nil { + query = query.Where(decision.UntilGT(*since)) + } + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) @@ -281,12 +285,15 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters return data, nil } -func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( - decision.CreatedAtGT(since), decision.UntilGT(time.Now().UTC()), ) + if since != nil { + query = query.Where(decision.CreatedAtGT(*since)) + } + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 973442bfa66..d7597d2a449 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -34,7 +34,7 @@ type Bouncer struct { // Version holds the value of the "version" field. Version string `json:"version"` // LastPull holds the value of the "last_pull" field. - LastPull time.Time `json:"last_pull"` + LastPull *time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` selectValues sql.SelectValues @@ -126,7 +126,8 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field last_pull", values[i]) } else if value.Valid { - b.LastPull = value.Time + b.LastPull = new(time.Time) + *b.LastPull = value.Time } case bouncer.FieldAuthType: if value, ok := values[i].(*sql.NullString); !ok { @@ -193,8 +194,10 @@ func (b *Bouncer) String() string { builder.WriteString("version=") builder.WriteString(b.Version) builder.WriteString(", ") - builder.WriteString("last_pull=") - builder.WriteString(b.LastPull.Format(time.ANSIC)) + if v := b.LastPull; v != nil { + builder.WriteString("last_pull=") + builder.WriteString(v.Format(time.ANSIC)) + } builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(b.AuthType) diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 3f201347e40..59afb199cb5 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -71,8 +71,6 @@ var ( UpdateDefaultUpdatedAt func() time.Time // DefaultIPAddress holds the default value on creation for the "ip_address" field. DefaultIPAddress string - // DefaultLastPull holds the default value on creation for the "last_pull" field. - DefaultLastPull func() time.Time // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string ) diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 86079794fee..e3c5752331e 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -589,6 +589,16 @@ func LastPullLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldLastPull, v)) } +// LastPullIsNil applies the IsNil predicate on the "last_pull" field. +func LastPullIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldLastPull)) +} + +// LastPullNotNil applies the NotNil predicate on the "last_pull" field. +func LastPullNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldLastPull)) +} + // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 7a4b3d9b013..f2dfc767872 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -183,10 +183,6 @@ func (bc *BouncerCreate) defaults() { v := bouncer.DefaultIPAddress bc.mutation.SetIPAddress(v) } - if _, ok := bc.mutation.LastPull(); !ok { - v := bouncer.DefaultLastPull() - bc.mutation.SetLastPull(v) - } if _, ok := bc.mutation.AuthType(); !ok { v := bouncer.DefaultAuthType bc.mutation.SetAuthType(v) @@ -210,9 +206,6 @@ func (bc *BouncerCreate) check() error { if _, ok := bc.mutation.Revoked(); !ok { return &ValidationError{Name: "revoked", err: errors.New(`ent: missing required field "Bouncer.revoked"`)} } - if _, ok := bc.mutation.LastPull(); !ok { - return &ValidationError{Name: "last_pull", err: errors.New(`ent: missing required field "Bouncer.last_pull"`)} - } if _, ok := bc.mutation.AuthType(); !ok { return &ValidationError{Name: "auth_type", err: errors.New(`ent: missing required field "Bouncer.auth_type"`)} } @@ -276,7 +269,7 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { } if value, ok := bc.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) - _node.LastPull = value + _node.LastPull = &value } if value, ok := bc.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index 1dc5aa080c3..31dd0bd708e 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -136,6 +136,12 @@ func (bu *BouncerUpdate) SetNillableLastPull(t *time.Time) *BouncerUpdate { return bu } +// ClearLastPull clears the value of the "last_pull" field. +func (bu *BouncerUpdate) ClearLastPull() *BouncerUpdate { + bu.mutation.ClearLastPull() + return bu +} + // SetAuthType sets the "auth_type" field. func (bu *BouncerUpdate) SetAuthType(s string) *BouncerUpdate { bu.mutation.SetAuthType(s) @@ -230,6 +236,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } + if bu.mutation.LastPullCleared() { + _spec.ClearField(bouncer.FieldLastPull, field.TypeTime) + } if value, ok := bu.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } @@ -361,6 +370,12 @@ func (buo *BouncerUpdateOne) SetNillableLastPull(t *time.Time) *BouncerUpdateOne return buo } +// ClearLastPull clears the value of the "last_pull" field. +func (buo *BouncerUpdateOne) ClearLastPull() *BouncerUpdateOne { + buo.mutation.ClearLastPull() + return buo +} + // SetAuthType sets the "auth_type" field. func (buo *BouncerUpdateOne) SetAuthType(s string) *BouncerUpdateOne { buo.mutation.SetAuthType(s) @@ -485,6 +500,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } + if buo.mutation.LastPullCleared() { + _spec.ClearField(bouncer.FieldLastPull, field.TypeTime) + } if value, ok := buo.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index b0e7f990f6e..584e848f09e 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -68,7 +68,7 @@ var ( {Name: "ip_address", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "type", Type: field.TypeString, Nullable: true}, {Name: "version", Type: field.TypeString, Nullable: true}, - {Name: "last_pull", Type: field.TypeTime}, + {Name: "last_pull", Type: field.TypeTime, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, } // BouncersTable holds the schema information for the "bouncers" table. diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index b88154324bb..c012e870c8f 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -2840,7 +2840,7 @@ func (m *BouncerMutation) LastPull() (r time.Time, exists bool) { // OldLastPull returns the old "last_pull" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldLastPull(ctx context.Context) (v time.Time, err error) { +func (m *BouncerMutation) OldLastPull(ctx context.Context) (v *time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldLastPull is only allowed on UpdateOne operations") } @@ -2854,9 +2854,22 @@ func (m *BouncerMutation) OldLastPull(ctx context.Context) (v time.Time, err err return oldValue.LastPull, nil } +// ClearLastPull clears the value of the "last_pull" field. +func (m *BouncerMutation) ClearLastPull() { + m.last_pull = nil + m.clearedFields[bouncer.FieldLastPull] = struct{}{} +} + +// LastPullCleared returns if the "last_pull" field was cleared in this mutation. +func (m *BouncerMutation) LastPullCleared() bool { + _, ok := m.clearedFields[bouncer.FieldLastPull] + return ok +} + // ResetLastPull resets all changes to the "last_pull" field. func (m *BouncerMutation) ResetLastPull() { m.last_pull = nil + delete(m.clearedFields, bouncer.FieldLastPull) } // SetAuthType sets the "auth_type" field. @@ -3135,6 +3148,9 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldVersion) { fields = append(fields, bouncer.FieldVersion) } + if m.FieldCleared(bouncer.FieldLastPull) { + fields = append(fields, bouncer.FieldLastPull) + } return fields } @@ -3158,6 +3174,9 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldVersion: m.ClearVersion() return nil + case bouncer.FieldLastPull: + m.ClearLastPull() + return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index c593cd89fcb..b4da6dfb9db 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -72,10 +72,6 @@ func init() { bouncerDescIPAddress := bouncerFields[5].Descriptor() // bouncer.DefaultIPAddress holds the default value on creation for the ip_address field. bouncer.DefaultIPAddress = bouncerDescIPAddress.Default.(string) - // bouncerDescLastPull is the schema descriptor for last_pull field. - bouncerDescLastPull := bouncerFields[8].Descriptor() - // bouncer.DefaultLastPull holds the default value on creation for the last_pull field. - bouncer.DefaultLastPull = bouncerDescLastPull.Default.(func() time.Time) // bouncerDescAuthType is the schema descriptor for auth_type field. bouncerDescAuthType := bouncerFields[9].Descriptor() // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index acaa86008f5..242b5f5fe4a 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -28,8 +28,7 @@ func (Bouncer) Fields() []ent.Field { field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), field.String("type").Optional().StructTag(`json:"type"`), field.String("version").Optional().StructTag(`json:"version"`), - field.Time("last_pull"). - Default(types.UtcNow).StructTag(`json:"last_pull"`), + field.Time("last_pull").Nillable().Optional().StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), } } diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 1ef39ceb05e..5bf4b5358db 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -39,7 +39,30 @@ teardown() { assert_output --partial "API key for 'ciTestBouncer':" rune -0 cscli bouncers delete ciTestBouncer rune -0 cscli bouncers list -o json - assert_output '[]' + assert_json '[]' +} + +@test "cscli bouncers list" { + export API_KEY=bouncerkey + rune -0 cscli bouncers add ciTestBouncer --key "$API_KEY" + + rune -0 cscli bouncers list -o json + rune -0 jq -c '.[] | [.ip_address,.last_pull,.name]' <(output) + assert_json '["",null,"ciTestBouncer"]' + rune -0 cscli bouncers list -o raw + assert_line 'name,ip,revoked,last_pull,type,version,auth_type' + assert_line 'ciTestBouncer,,validated,,,,api-key' + rune -0 cscli bouncers list -o human + assert_output --regexp 'ciTestBouncer.*api-key.*' + + # the first connection sets last_pull and ip address + rune -0 lapi-get '/v1/decisions' + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[] | .ip_address' <(output) + assert_output 127.0.0.1 + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[] | .last_pull' <(output) + refute_output null } @test "we can create a bouncer with a known key" { @@ -83,4 +106,3 @@ teardown() { rune -0 cscli bouncers prune assert_output 'No bouncers to prune.' } - From 4521a98ecc34cf4beeee653f1eb5914f1879f42d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:39:50 +0200 Subject: [PATCH 192/318] db: don't set machine heartbeat until first connection (#3019) * db: don't set machine heartbeat until first connection * cscli machines prune: if hearbeat is not set, look at creation date * lint --- cmd/crowdsec-cli/machines.go | 2 +- pkg/database/ent/machine/machine.go | 2 -- pkg/database/ent/machine_create.go | 4 --- pkg/database/ent/runtime.go | 4 --- pkg/database/ent/schema/machine.go | 2 +- pkg/database/machines.go | 45 ++++++++++++++++++++--------- test/bats/30_machines.bats | 7 +++++ 7 files changed, 41 insertions(+), 25 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 7beaa5c7fdd..20933dc28e5 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -414,7 +414,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } if !notValidOnly { - if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(-duration)); err == nil { + if pending, err := cli.db.QueryMachinesInactiveSince(time.Now().UTC().Add(-duration)); err == nil { machines = append(machines, pending...) } } diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 46ea6deb03d..d7dece9f8ef 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -87,8 +87,6 @@ var ( UpdateDefaultUpdatedAt func() time.Time // DefaultLastPush holds the default value on creation for the "last_push" field. DefaultLastPush func() time.Time - // DefaultLastHeartbeat holds the default value on creation for the "last_heartbeat" field. - DefaultLastHeartbeat func() time.Time // ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. ScenariosValidator func(string) error // DefaultIsValidated holds the default value on creation for the "isValidated" field. diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 8d4bfb74b2a..2e4cf9f1500 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -227,10 +227,6 @@ func (mc *MachineCreate) defaults() { v := machine.DefaultLastPush() mc.mutation.SetLastPush(v) } - if _, ok := mc.mutation.LastHeartbeat(); !ok { - v := machine.DefaultLastHeartbeat() - mc.mutation.SetLastHeartbeat(v) - } if _, ok := mc.mutation.IsValidated(); !ok { v := machine.DefaultIsValidated mc.mutation.SetIsValidated(v) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index b4da6dfb9db..8d50d916029 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -142,10 +142,6 @@ func init() { machineDescLastPush := machineFields[2].Descriptor() // machine.DefaultLastPush holds the default value on creation for the last_push field. machine.DefaultLastPush = machineDescLastPush.Default.(func() time.Time) - // machineDescLastHeartbeat is the schema descriptor for last_heartbeat field. - machineDescLastHeartbeat := machineFields[3].Descriptor() - // machine.DefaultLastHeartbeat holds the default value on creation for the last_heartbeat field. - machine.DefaultLastHeartbeat = machineDescLastHeartbeat.Default.(func() time.Time) // machineDescScenarios is the schema descriptor for scenarios field. machineDescScenarios := machineFields[7].Descriptor() // machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 997a2041453..7b4d97ed35c 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -4,6 +4,7 @@ import ( "entgo.io/ent" "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -25,7 +26,6 @@ func (Machine) Fields() []ent.Field { Default(types.UtcNow). Nillable().Optional(), field.Time("last_heartbeat"). - Default(types.UtcNow). Nillable().Optional(), field.String("machineId"). Unique(). diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 7a64c1d4d6e..18fd32fdd84 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -13,8 +13,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const CapiMachineID = types.CAPIOrigin -const CapiListsMachineID = types.ListOrigin +const ( + CapiMachineID = types.CAPIOrigin + CapiListsMachineID = types.ListOrigin +) func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) @@ -30,6 +32,7 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } + if len(machineExist) > 0 { if force { _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) @@ -37,12 +40,15 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) } + machine, err := c.QueryMachineByID(*machineID) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } + return machine, nil } + return nil, errors.Wrapf(UserExists, "user '%s'", *machineID) } @@ -54,7 +60,6 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA SetIsValidated(isValidated). SetAuthType(authType). Save(c.CTX) - if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) @@ -72,6 +77,7 @@ func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { c.Log.Warningf("QueryMachineByID : %s", err) return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) } + return machine, nil } @@ -80,6 +86,7 @@ func (c *Client) ListMachines() ([]*ent.Machine, error) { if err != nil { return nil, errors.Wrapf(QueryFail, "listing machines: %s", err) } + return machines, nil } @@ -88,21 +95,21 @@ func (c *Client) ValidateMachine(machineID string) error { if err != nil { return errors.Wrapf(UpdateFail, "validating machine: %s", err) } + if rets == 0 { - return fmt.Errorf("machine not found") + return errors.New("machine not found") } + return nil } func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { - var machines []*ent.Machine - var err error - - machines, err = c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) + machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) if err != nil { c.Log.Warningf("QueryPendingMachine : %s", err) return nil, errors.Wrapf(QueryFail, "querying pending machines: %s", err) } + return machines, nil } @@ -116,7 +123,7 @@ func (c *Client) DeleteWatcher(name string) error { } if nbDeleted == 0 { - return fmt.Errorf("machine doesn't exist") + return errors.New("machine doesn't exist") } return nil @@ -127,10 +134,12 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { for i, b := range machines { ids[i] = b.ID } + nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(c.CTX) if err != nil { return nbDeleted, err } + return nbDeleted, nil } @@ -139,6 +148,7 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { if err != nil { return errors.Wrapf(UpdateFail, "updating machine last_heartbeat: %s", err) } + return nil } @@ -150,6 +160,7 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { if err != nil { return fmt.Errorf("unable to update machine in database: %s", err) } + return nil } @@ -160,6 +171,7 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { if err != nil { return fmt.Errorf("unable to update machine IP in database: %s", err) } + return nil } @@ -170,6 +182,7 @@ func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { if err != nil { return fmt.Errorf("unable to update machine version in database: %s", err) } + return nil } @@ -178,17 +191,23 @@ func (c *Client) IsMachineRegistered(machineID string) (bool, error) { if err != nil { return false, err } + if len(exist) == 1 { return true, nil } + if len(exist) > 1 { - return false, fmt.Errorf("more than one item with the same machineID in database") + return false, errors.New("more than one item with the same machineID in database") } return false, nil - } -func (c *Client) QueryLastValidatedHeartbeatLT(t time.Time) ([]*ent.Machine, error) { - return c.Ent.Machine.Query().Where(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)).All(c.CTX) +func (c *Client) QueryMachinesInactiveSince(t time.Time) ([]*ent.Machine, error) { + return c.Ent.Machine.Query().Where( + machine.Or( + machine.And(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)), + machine.And(machine.LastHeartbeatIsNil(), machine.CreatedAtLT(t)), + ), + ).All(c.CTX) } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 415e5f8693f..1d65151b6c8 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,6 +62,13 @@ teardown() { assert_output 1 } +@test "heartbeat is initially null" { + rune -0 cscli machines add foo --auto --file /dev/null + rune -0 cscli machines list -o json + rune -0 yq '.[] | select(.machineId == "foo") | .last_heartbeat' <(output) + assert_output null +} + @test "register, validate and then remove a machine" { rune -0 cscli lapi register --machine CiTestMachineRegister -f /dev/null -o human assert_stderr --partial "Successfully registered to Local API (LAPI)" From 659774fd3d17eea9a872aafbe553477da036b547 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Jun 2024 10:38:23 +0200 Subject: [PATCH 193/318] refactor: prefer logrus.WithField over WithFields with a single param (#3087) --- cmd/crowdsec-cli/config_backup.go | 8 ++----- pkg/acquisition/acquisition.go | 4 +--- pkg/acquisition/modules/appsec/appsec.go | 4 +--- pkg/acquisition/modules/appsec/appsec_test.go | 2 +- .../modules/cloudwatch/cloudwatch.go | 2 +- pkg/acquisition/modules/docker/docker.go | 2 +- pkg/acquisition/modules/docker/docker_test.go | 24 +++++-------------- pkg/acquisition/modules/file/file_test.go | 20 ++++------------ .../modules/journalctl/journalctl_test.go | 24 +++++-------------- pkg/acquisition/modules/kafka/kafka_test.go | 8 ++----- pkg/acquisition/modules/kinesis/kinesis.go | 8 +++---- .../modules/kinesis/kinesis_test.go | 20 ++++------------ .../modules/kubernetesaudit/k8s_audit_test.go | 8 ++----- pkg/acquisition/modules/loki/loki_test.go | 12 +++------- pkg/acquisition/modules/syslog/syslog_test.go | 8 ++----- .../modules/wineventlog/wineventlog_test.go | 12 +++------- pkg/apiserver/middlewares/v1/api_key.go | 12 +++------- pkg/cache/cache.go | 4 +--- pkg/cwhub/dataset.go | 2 +- pkg/cwhub/itemupgrade.go | 2 +- pkg/cwhub/remote.go | 2 +- pkg/exprhelpers/crowdsec_cti.go | 5 +--- pkg/parser/enrich_date_test.go | 4 +--- pkg/parser/node.go | 8 ++----- pkg/types/queue.go | 2 +- 25 files changed, 57 insertions(+), 150 deletions(-) diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index d1e4a393555..e8ac6213530 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -21,9 +21,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { } for _, itemType := range cwhub.ItemTypes { - clog := log.WithFields(log.Fields{ - "type": itemType, - }) + clog := log.WithField("type", itemType) itemMap := hub.GetItemMap(itemType) if itemMap == nil { @@ -39,9 +37,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { upstreamParsers := []string{} for k, v := range itemMap { - clog = clog.WithFields(log.Fields{ - "file": v.Name, - }) + clog = clog.WithField("file", v.Name) if !v.State.Installed { // only backup installed ones clog.Debugf("[%s]: not installed", k) continue diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index ab7d954cac1..069bf67d852 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -161,9 +161,7 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr if err := types.ConfigureLogger(clog); err != nil { return nil, fmt.Errorf("while configuring datasource logger: %w", err) } - subLogger := clog.WithFields(log.Fields{ - "type": dsn, - }) + subLogger := clog.WithField("type", dsn) uniqueId := uuid.NewString() if transformExpr != "" { vm, err := expr.Compile(transformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index f97905406ce..07ca56dfb70 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -210,9 +210,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe runner := AppsecRunner{ inChan: w.InChan, UUID: appsecRunnerUUID, - logger: w.logger.WithFields(log.Fields{ - "runner_uuid": appsecRunnerUUID, - }), + logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), AppsecRuntime: &wrt, Labels: w.config.Labels, } diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 5fe4cfe236c..c769ea3d0fd 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -41,7 +41,7 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { InChan := make(chan appsec.ParsedRequest) OutChan := make(chan types.Event) - logger := log.WithFields(log.Fields{"test": test.name}) + logger := log.WithField("test", test.name) //build rules for ridx, rule := range test.inband_rules { diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1ac1465d390..1859bbf0f84 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -403,7 +403,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() } newStream.t = tomb.Tomb{} - newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName}) + newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { return cw.TailLogStream(&newStream, outChan) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 3a694b99d76..857d7e7af78 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -609,7 +609,7 @@ func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan * case newContainer := <-in: if _, ok := d.runningContainerState[newContainer.ID]; !ok { newContainer.t = &tomb.Tomb{} - newContainer.logger = d.logger.WithFields(log.Fields{"container_name": newContainer.Name}) + newContainer.logger = d.logger.WithField("container_name", newContainer.Name) newContainer.t.Go(func() error { return d.TailDocker(newContainer, outChan, deleteChan) }) diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index bcf0966a2d1..e1cc4db96ad 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -55,9 +55,7 @@ container_name: }, } - subLogger := log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger := log.WithField("type", "docker") for _, test := range tests { f := DockerSource{} @@ -108,9 +106,7 @@ func TestConfigureDSN(t *testing.T) { expectedErr: "", }, } - subLogger := log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger := log.WithField("type", "docker") for _, test := range tests { f := DockerSource{} @@ -169,13 +165,9 @@ container_name_regexp: if ts.expectedOutput != "" { logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = logger.WithField("type", "docker") } else { - subLogger = log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = log.WithField("type", "docker") } readLogs = false @@ -310,14 +302,10 @@ func TestOneShot(t *testing.T) { if ts.expectedOutput != "" { logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = logger.WithField("type", "docker") } else { log.SetLevel(ts.logLevel) - subLogger = log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = log.WithField("type", "docker") } readLogs = false diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 0873b837a3f..688812f2fd3 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -49,9 +49,7 @@ exclude_regexps: ["as[a-$d"]`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "file", - }) + subLogger := log.WithField("type", "file") for _, tc := range tests { tc := tc @@ -91,9 +89,7 @@ func TestConfigureDSN(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "file", - }) + subLogger := log.WithField("type", "file") for _, tc := range tests { tc := tc @@ -211,9 +207,7 @@ filename: test_files/test_delete.log`, logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") tomb := tomb.Tomb{} out := make(chan types.Event, 100) @@ -372,9 +366,7 @@ force_inotify: true`, testPattern), logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") tomb := tomb.Tomb{} out := make(chan types.Event) @@ -451,9 +443,7 @@ func TestExclusion(t *testing.T) { exclude_regexps: ["\\.gz$"]` logger, hook := test.NewNullLogger() // logger.SetLevel(ts.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") f := fileacquisition.FileSource{} if err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE); err != nil { diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 9d1f1bb7e0e..f381a227534 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -47,9 +47,7 @@ journalctl_filter: }, } - subLogger := log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger := log.WithField("type", "journalctl") for _, test := range tests { f := JournalCtlSource{} @@ -97,9 +95,7 @@ func TestConfigureDSN(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger := log.WithField("type", "journalctl") for _, test := range tests { f := JournalCtlSource{} @@ -153,13 +149,9 @@ journalctl_filter: if ts.expectedOutput != "" { logger, hook = test.NewNullLogger() logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = logger.WithField("type", "journalctl") } else { - subLogger = log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = log.WithField("type", "journalctl") } tomb := tomb.Tomb{} @@ -227,13 +219,9 @@ journalctl_filter: if ts.expectedOutput != "" { logger, hook = test.NewNullLogger() logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = logger.WithField("type", "journalctl") } else { - subLogger = log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = log.WithField("type", "journalctl") } tomb := tomb.Tomb{} diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 54715a81251..245d3ed58c8 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -149,9 +149,7 @@ func TestStreamingAcquisition(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") createTopic("crowdsecplaintext", "localhost:9092") @@ -222,9 +220,7 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") createTopic("crowdsecssl", "localhost:9092") diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 5d3cf8f80a0..485cefcf01d 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -334,7 +334,7 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { - logger := k.logger.WithFields(log.Fields{"shard_id": shardId}) + logger := k.logger.WithField("shard_id", shardId) //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately //and we won't be able to start a new one if this is the first one started by the tomb //TODO: look into parent shards to see if a shard is closed before starting to read it ? @@ -397,7 +397,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) } - k.logger = k.logger.WithFields(log.Fields{"stream": parsedARN.Resource[7:]}) + k.logger = k.logger.WithField("stream", parsedARN.Resource[7:]) k.logger.Info("starting kinesis acquisition with enhanced fan-out") err = k.DeregisterConsumer() if err != nil { @@ -439,7 +439,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { } func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { - logger := k.logger.WithFields(log.Fields{"shard": shardId}) + logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, @@ -485,7 +485,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro } func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { - k.logger = k.logger.WithFields(log.Fields{"stream": k.Config.StreamName}) + k.logger = k.logger.WithField("stream", k.Config.StreamName) k.logger.Info("starting kinesis acquisition from shards") for { shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index a4e4f2f7378..d1d398c129e 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -139,9 +139,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "kinesis", - }) + subLogger := log.WithField("type", "kinesis") for _, test := range tests { f := KinesisSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -171,9 +169,7 @@ stream_name: stream-1-shard`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -217,9 +213,7 @@ stream_name: stream-2-shards`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -266,9 +260,7 @@ from_subscription: true`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -312,9 +304,7 @@ use_enhanced_fanout: true`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - })) + err := f.Configure([]byte(config), log.WithField("type", "kinesis")) if err != nil { t.Fatalf("Error configuring source: %s", err) } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index 331822ecf5b..b6e6f6b03e9 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -67,9 +67,7 @@ webhook_path: /k8s-audit`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "k8s-audit", - }) + subLogger := log.WithField("type", "k8s-audit") for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -230,9 +228,7 @@ webhook_path: /k8s-audit`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "k8s-audit", - }) + subLogger := log.WithField("type", "k8s-audit") for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 83742546959..051a9b93ed5 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -124,9 +124,7 @@ query: > testName: "Invalid DelayFor", }, } - subLogger := log.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := log.WithField("type", "loki") for _, test := range tests { t.Run(test.testName, func(t *testing.T) { @@ -343,9 +341,7 @@ since: 1h for _, ts := range tests { logger := log.New() - subLogger := logger.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) @@ -509,9 +505,7 @@ query: > {server="demo"} ` logger := log.New() - subLogger := logger.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := logger.WithField("type", "loki") title := time.Now().String() lokiSource := loki.LokiSource{} diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index ba14c7db053..8096740f5e4 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -52,9 +52,7 @@ listen_addr: 10.0.0`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "syslog", - }) + subLogger := log.WithField("type", "syslog") for _, test := range tests { s := SyslogSource{} err := s.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -134,9 +132,7 @@ listen_addr: 127.0.0.1`, for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { - subLogger := log.WithFields(log.Fields{ - "type": "syslog", - }) + subLogger := log.WithField("type", "syslog") s := SyslogSource{} err := s.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index c937ceba825..2ea0e365be5 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -54,9 +54,7 @@ xpath_query: test`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { f := WinEventLogSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -113,9 +111,7 @@ event_level: bla`, expectedErr: "invalid log level", }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { f := WinEventLogSource{} f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -181,9 +177,7 @@ event_ids: expectedLines: nil, }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") evthandler, err := eventlog.Open("Application") diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 4561b8f7789..314a4da1046 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -75,9 +75,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } - logger = logger.WithFields(log.Fields{ - "cn": extractedCN, - }) + logger = logger.WithField("cn", extractedCN) bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) @@ -141,9 +139,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { clientIP := c.ClientIP() - logger := log.WithFields(log.Fields{ - "ip": clientIP, - }) + logger := log.WithField("ip", clientIP) if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { bouncer = a.authTLS(c, logger) @@ -158,9 +154,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return } - logger = logger.WithFields(log.Fields{ - "name": bouncer.Name, - }) + logger = logger.WithField("name", bouncer.Name) if bouncer.IPAddress == "" { if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index c2153ee2342..8a696caf1f4 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -64,9 +64,7 @@ func CacheInit(cfg CacheCfg) error { } clog.SetLevel(*cfg.LogLevel) - cfg.Logger = clog.WithFields(log.Fields{ - "cache": cfg.Name, - }) + cfg.Logger = clog.WithField("cache", cfg.Name) tmpCache := gcache.New(cfg.Size) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 6d4f35c285c..90bc9e057f9 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -46,7 +46,7 @@ func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader WithHTTPClient(hubClient). ToFile(destPath). CompareContent(). - WithLogger(logrus.WithFields(logrus.Fields{"url": dataS.SourceURL})) + WithLogger(logrus.WithField("url", dataS.SourceURL)) if !force { d = d.WithLastModified(). diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 4dad226fd78..d74544ddaed 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -125,7 +125,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin WithHTTPClient(hubClient). ToFile(destPath). WithMakeDirs(true). - WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + WithLogger(logrus.WithField("url", url)). CompareContent(). VerifyHash("sha256", wantHash) diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index abb2ddae2ad..0678a7488f8 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -46,7 +46,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e WithHTTPClient(hubClient). ToFile(destPath). CompareContent(). - WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + WithLogger(logrus.WithField("url", url)). Download(ctx, url) if err != nil { return false, err diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index a640ea79f48..4e54b07bfc2 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -45,10 +45,7 @@ func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.L if LogLevel != nil { clog.SetLevel(*LogLevel) } - customLog := log.Fields{ - "type": "crowdsec-cti", - } - subLogger := clog.WithFields(customLog) + subLogger := clog.WithField("type", "crowdsec-cti") CrowdsecCTIInitCache(*Size, *TTL) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) CTIApiEnabled = true diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 085ef5ca342..434667cbeaf 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -42,9 +42,7 @@ func TestDateParse(t *testing.T) { }, } - logger := log.WithFields(log.Fields{ - "test": "test", - }) + logger := log.WithField("test", "test") for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 4bb2c4c5eaf..bb57995a129 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -464,15 +464,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } clog.SetLevel(log.DebugLevel) - n.Logger = clog.WithFields(log.Fields{ - "id": n.rn, - }) + n.Logger = clog.WithField("id", n.rn) n.Logger.Infof("%s has debug enabled", n.Name) } else { /* else bind it to the default one (might find something more elegant here)*/ - n.Logger = log.WithFields(log.Fields{ - "id": n.rn, - }) + n.Logger = log.WithField("id", n.rn) } /* display info about top-level nodes, they should be the only one with explicit stage name ?*/ diff --git a/pkg/types/queue.go b/pkg/types/queue.go index d9b737d548f..12a3ab37074 100644 --- a/pkg/types/queue.go +++ b/pkg/types/queue.go @@ -22,7 +22,7 @@ func NewQueue(l int) *Queue { Queue: make([]Event, 0, l), L: l, } - log.WithFields(log.Fields{"Capacity": q.L}).Debugf("Creating queue") + log.WithField("Capacity", q.L).Debugf("Creating queue") return q } From a1ebe1822186c298a03de3ae2668038f27174033 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:18:41 +0200 Subject: [PATCH 194/318] config: expand env variables in local_api_credentials.yaml and .yaml.local (#3093) * config: expand env variables in local_api_credentials.yaml and .yaml.local * fix func test --- pkg/csconfig/api.go | 5 ++++- test/bats/01_cscli.bats | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 267e101cea8..fb29111ecd5 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -126,7 +127,9 @@ func (l *LocalApiClientCfg) Load() error { return err } - dec := yaml.NewDecoder(bytes.NewReader(fcontent)) + configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) + + dec := yaml.NewDecoder(strings.NewReader(configData)) dec.KnownFields(true) err = dec.Decode(&l.Credentials) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 8dfdf701a95..a0878ad0e12 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -245,6 +245,43 @@ teardown() { assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" } +@test "cscli - LAPI credentials file can reference env variables" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') + export URL + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + export LOGIN + PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') + export PASSWORD + + # shellcheck disable=SC2016 + echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local + + config_set '.crowdsec_service.enable=false' + rune -0 ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + rm "$LOCAL_API_CREDENTIALS".local + + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' + + rune -0 cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + # but if a variable is not defined, there is no specific error message + unset URL + rune -1 cscli lapi status + # shellcheck disable=SC2016 + assert_stderr --partial 'BaseURL must have a trailing slash' +} + @test "cscli - missing LAPI client settings" { config_set 'del(.api.client)' rune -1 cscli lapi status From 8a259fd25b1c6abe3c7ee446c955a3144b295248 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Jun 2024 22:13:26 +0200 Subject: [PATCH 195/318] lint (copyloopvar) (#2971) --- pkg/acquisition/acquisition_test.go | 3 --- pkg/acquisition/modules/appsec/appsec.go | 1 - pkg/acquisition/modules/cloudwatch/cloudwatch_test.go | 4 ---- pkg/acquisition/modules/file/file_test.go | 4 ---- pkg/acquisition/modules/kafka/kafka_test.go | 2 -- .../modules/syslog/internal/parser/rfc3164/parse_test.go | 6 ------ .../modules/syslog/internal/parser/rfc3164/perf_test.go | 1 - .../modules/syslog/internal/parser/rfc5424/parse_test.go | 3 --- .../modules/syslog/internal/parser/rfc5424/perf_test.go | 1 - pkg/acquisition/modules/syslog/syslog_test.go | 1 - pkg/apiclient/decisions_service_test.go | 1 - pkg/apiserver/apic_metrics_test.go | 1 - pkg/apiserver/apic_test.go | 7 ------- pkg/csconfig/api_test.go | 4 ---- pkg/csconfig/config_test.go | 1 - pkg/csconfig/crowdsec_service_test.go | 1 - pkg/csconfig/cscli_test.go | 1 - pkg/csconfig/database_test.go | 1 - pkg/csconfig/hub_test.go | 1 - pkg/csconfig/simulation_test.go | 2 -- pkg/csplugin/broker_test.go | 1 - pkg/csplugin/listfiles_test.go | 1 - pkg/csplugin/utils_test.go | 1 - pkg/csprofiles/csprofiles_test.go | 2 -- pkg/exprhelpers/exprlib_test.go | 5 ----- pkg/exprhelpers/jsonextract_test.go | 2 -- pkg/fflag/features_test.go | 7 ------- pkg/parser/enrich_date_test.go | 1 - pkg/parser/whitelist_test.go | 2 -- pkg/setup/detect_test.go | 8 -------- pkg/types/event_test.go | 3 --- 31 files changed, 79 deletions(-) diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 1fbac2cdc00..cc57aecb732 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -179,7 +179,6 @@ wowo: ajsajasjas } for _, tc := range tests { - tc := tc t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} yaml.Unmarshal([]byte(tc.String), &common) @@ -282,7 +281,6 @@ func TestLoadAcquisitionFromFile(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.TestName, func(t *testing.T) { dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) @@ -549,7 +547,6 @@ func TestConfigureByDSN(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.dsn, func(t *testing.T) { srcs, err := LoadAcquisitionFromDSN(tc.dsn, map[string]string{"type": "test_label"}, "") cstest.RequireErrorContains(t, err, tc.ExpectedError) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 07ca56dfb70..1d9a9bfec3c 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -251,7 +251,6 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) w.logger.Infof("%d appsec runner to start", len(w.AppsecRunners)) for _, runner := range w.AppsecRunners { - runner := runner runner.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live/runner") diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 12e01ddf609..89a2b56bc00 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -423,7 +423,6 @@ stream_name: test_stream`), } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) @@ -556,7 +555,6 @@ stream_name: test_stream`), } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) @@ -621,7 +619,6 @@ func TestConfigureByDSN(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) @@ -743,7 +740,6 @@ func TestOneShotAcquisition(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 688812f2fd3..2f84c6de324 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -52,7 +52,6 @@ exclude_regexps: ["as[a-$d"]`, subLogger := log.WithField("type", "file") for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := fileacquisition.FileSource{} err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) @@ -92,7 +91,6 @@ func TestConfigureDSN(t *testing.T) { subLogger := log.WithField("type", "file") for _, tc := range tests { - tc := tc t.Run(tc.dsn, func(t *testing.T) { f := fileacquisition.FileSource{} err := f.ConfigureByDSN(tc.dsn, map[string]string{"type": "testtype"}, subLogger, "") @@ -202,7 +200,6 @@ filename: test_files/test_delete.log`, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) @@ -361,7 +358,6 @@ force_inotify: true`, testPattern), } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 245d3ed58c8..7b467142cc9 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -162,7 +162,6 @@ func TestStreamingAcquisition(t *testing.T) { } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} @@ -233,7 +232,6 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go index 48772d596f4..8fb5089a61f 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -22,7 +22,6 @@ func TestPri(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) @@ -64,7 +63,6 @@ func TestTimestamp(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { opts := []RFC3164Option{} if test.currentYear { @@ -118,7 +116,6 @@ func TestHostname(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { opts := []RFC3164Option{} if test.strictHostname { @@ -163,7 +160,6 @@ func TestTag(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) @@ -207,7 +203,6 @@ func TestMessage(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) @@ -329,7 +324,6 @@ func TestParse(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := NewRFC3164Parser(test.opts...) err := r.Parse([]byte(test.input)) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go index 42073cafbae..3805090f57f 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go @@ -51,7 +51,6 @@ func BenchmarkParse(b *testing.B) { } var err error for _, test := range tests { - test := test b.Run(string(test.input), func(b *testing.B) { for i := 0; i < b.N; i++ { r := NewRFC3164Parser(test.opts...) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index 66a20d594e4..eed72244867 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -25,7 +25,6 @@ func TestPri(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC5424{} r.buf = []byte(test.input) @@ -61,7 +60,6 @@ func TestHostname(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { opts := []RFC5424Option{} if test.strictHostname { @@ -200,7 +198,6 @@ func TestParse(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { r := NewRFC5424Parser(test.opts...) err := r.Parse([]byte(test.input)) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go index 318571e91ee..a86c17e8ddf 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go @@ -92,7 +92,6 @@ func BenchmarkParse(b *testing.B) { } var err error for _, test := range tests { - test := test b.Run(test.label, func(b *testing.B) { for i := 0; i < b.N; i++ { r := NewRFC5424Parser() diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 8096740f5e4..0e823ecd32a 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -130,7 +130,6 @@ listen_addr: 127.0.0.1`, } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { subLogger := log.WithField("type", "syslog") s := SyslogSource{} diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 97b5aa26482..6942cfc9d85 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -458,7 +458,6 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { o := &DecisionsStreamOpts{ Startup: tt.fields.Startup, diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index f3b9b352316..15ad63b391b 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -63,7 +63,6 @@ func TestAPICSendMetrics(t *testing.T) { defer httpmock.Deactivate() for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { url, err := url.ParseRequestURI("http://api.crowdsec.net/") require.NoError(t, err) diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 10f4cf9444b..dc6ed98f946 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -165,7 +165,6 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { api := getAPIC(t) for machineID, scenarios := range tc.machineIDsWithScenarios { @@ -233,7 +232,6 @@ func TestNewAPIC(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { setConfig() httpmock.Activate() @@ -353,7 +351,6 @@ func TestAPICGetMetrics(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { apiClient := getAPIC(t) cleanUp(apiClient) @@ -461,7 +458,6 @@ func TestCreateAlertsForDecision(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { if got := createAlertsForDecisions(tc.args.decisions); !reflect.DeepEqual(got, tc.want) { t.Errorf("createAlertsForDecisions() = %v, want %v", got, tc.want) @@ -541,7 +537,6 @@ func TestFillAlertsWithDecisions(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { addCounters, _ := makeAddAndDeleteCounters() if got := fillAlertsWithDecisions(tc.args.alerts, tc.args.decisions, addCounters); !reflect.DeepEqual(got, tc.want) { @@ -1159,7 +1154,6 @@ func TestAPICPull(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { api = getAPIC(t) api.pullInterval = time.Millisecond @@ -1286,7 +1280,6 @@ func TestShouldShareAlert(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { ret := shouldShareAlert(tc.alert, tc.consoleConfig) assert.Equal(t, tc.expectedRet, ret) diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 463b7c1b2ec..079936253a1 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -64,7 +64,6 @@ func TestLoadLocalApiClientCfg(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -122,7 +121,6 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -245,7 +243,6 @@ func TestLoadAPIServer(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadAPIServer(false) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -309,7 +306,6 @@ func TestParseCapiWhitelists(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { wl, err := parseCapiWhitelists(strings.NewReader(tc.input)) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 56ecc202373..11f1f0cf68d 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -32,7 +32,6 @@ func TestNewCrowdSecConfig(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { result := &Config{} assert.Equal(t, tc.expected, result) diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 8d332271b03..2f41beaf55e 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -181,7 +181,6 @@ func TestLoadCrowdsec(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadCrowdsec() cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/cscli_test.go b/pkg/csconfig/cscli_test.go index 807f02d216c..a58fdd6f857 100644 --- a/pkg/csconfig/cscli_test.go +++ b/pkg/csconfig/cscli_test.go @@ -39,7 +39,6 @@ func TestLoadCSCLI(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.loadCSCLI() cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go index c7741baf038..954b1c47fd7 100644 --- a/pkg/csconfig/database_test.go +++ b/pkg/csconfig/database_test.go @@ -46,7 +46,6 @@ func TestLoadDBConfig(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadDBConfig(false) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/hub_test.go b/pkg/csconfig/hub_test.go index 2f9528c6043..49d010a04f4 100644 --- a/pkg/csconfig/hub_test.go +++ b/pkg/csconfig/hub_test.go @@ -35,7 +35,6 @@ func TestLoadHub(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.loadHub() cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index 71b09ee397c..a678d7edd49 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -76,7 +76,6 @@ func TestSimulationLoading(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadSimulation() cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -124,7 +123,6 @@ func TestIsSimulated(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { isSimulated := tc.SimulationConfig.IsSimulated(tc.Input) require.Equal(t, tc.expected, isSimulated) diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 34c9ce7d684..f2179acb2c1 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -129,7 +129,6 @@ func (s *PluginSuite) TestBrokerInit() { } for _, tc := range tests { - tc := tc s.Run(tc.name, func() { t := s.T() if tc.action != nil { diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index a7b41c51d07..a4188804149 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -47,7 +47,6 @@ func TestListFilesAtPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, err := listFilesAtPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csplugin/utils_test.go b/pkg/csplugin/utils_test.go index f02e7f491b2..7fa9a77acd5 100644 --- a/pkg/csplugin/utils_test.go +++ b/pkg/csplugin/utils_test.go @@ -37,7 +37,6 @@ func TestGetPluginNameAndTypeFromPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, got1, err := getPluginTypeAndSubtypeFromPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go index be1d0178e72..0247243ddd3 100644 --- a/pkg/csprofiles/csprofiles_test.go +++ b/pkg/csprofiles/csprofiles_test.go @@ -102,7 +102,6 @@ func TestNewProfile(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ test.profileCfg, @@ -196,7 +195,6 @@ func TestEvaluateProfile(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ tt.args.profileCfg, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 0f6e1a21e2f..e65d568f65b 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1414,7 +1414,6 @@ func TestParseUnixTime(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { output, err := ParseUnixTime(tc.value) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -1518,7 +1517,6 @@ func TestIsIp(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { vm, err := expr.Compile(tc.expr, GetExprOptions(map[string]interface{}{"value": tc.value})...) if tc.expectedBuildErr { @@ -1571,7 +1569,6 @@ func TestToString(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { vm, err := expr.Compile(tc.expr, GetExprOptions(map[string]interface{}{"value": tc.value})...) require.NoError(t, err) @@ -1618,7 +1615,6 @@ func TestB64Decode(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { vm, err := expr.Compile(tc.expr, GetExprOptions(map[string]interface{}{"value": tc.value})...) if tc.expectedBuildErr { @@ -1691,7 +1687,6 @@ func TestParseKv(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { outMap := make(map[string]interface{}) env := map[string]interface{}{ diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go index 2ee3ada5ea7..80de1619580 100644 --- a/pkg/exprhelpers/jsonextract_test.go +++ b/pkg/exprhelpers/jsonextract_test.go @@ -159,7 +159,6 @@ func TestJsonExtractSlice(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { env := map[string]interface{}{ "blob": test.jsonBlob, @@ -215,7 +214,6 @@ func TestJsonExtractObject(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { env := map[string]interface{}{ "blob": test.jsonBlob, diff --git a/pkg/fflag/features_test.go b/pkg/fflag/features_test.go index 57745b3c38c..481e86573e8 100644 --- a/pkg/fflag/features_test.go +++ b/pkg/fflag/features_test.go @@ -50,8 +50,6 @@ func TestRegisterFeature(t *testing.T) { } for _, tc := range tests { - tc := tc - t.Run("", func(t *testing.T) { fr := fflag.FeatureRegister{EnvPrefix: "FFLAG_TEST_"} err := fr.RegisterFeature(&tc.feature) @@ -112,7 +110,6 @@ func TestGetFeature(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { _, err := fr.GetFeature(tc.feature) cstest.RequireErrorMessage(t, err, tc.expectedErr) @@ -145,7 +142,6 @@ func TestIsEnabled(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { feat, err := fr.GetFeature(tc.feature) require.NoError(t, err) @@ -204,7 +200,6 @@ func TestFeatureSet(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { feat, err := fr.GetFeature(tc.feature) cstest.RequireErrorMessage(t, err, tc.expectedGetErr) @@ -284,7 +279,6 @@ func TestSetFromEnv(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := logtest.NewNullLogger() logger.SetLevel(logrus.DebugLevel) @@ -344,7 +338,6 @@ func TestSetFromYaml(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := logtest.NewNullLogger() logger.SetLevel(logrus.DebugLevel) diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 434667cbeaf..930633feb35 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -44,7 +44,6 @@ func TestDateParse(t *testing.T) { logger := log.WithField("test", "test") for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, logger) cstest.RequireErrorContains(t, err, tt.expectedErr) diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 501c655243d..02846f17fc1 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -62,7 +62,6 @@ func TestWhitelistCompile(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { node.Whitelist = tt.whitelist _, err := node.CompileWLs() @@ -284,7 +283,6 @@ func TestWhitelistCheck(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { var err error node.Whitelist = tt.whitelist diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 0ff3438d974..c744e7d6796 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -94,7 +94,6 @@ func TestPathExists(t *testing.T) { } for _, tc := range tests { - tc := tc env := setup.NewExprEnvironment(setup.DetectOptions{}, setup.ExprOS{}) t.Run(tc.path, func(t *testing.T) { @@ -148,7 +147,6 @@ func TestVersionCheck(t *testing.T) { } for _, tc := range tests { - tc := tc e := setup.ExprOS{RawVersion: tc.version} t.Run(fmt.Sprintf("Check(%s,%s)", tc.version, tc.constraint), func(t *testing.T) { @@ -248,7 +246,6 @@ func TestListSupported(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -333,7 +330,6 @@ func TestApplyRules(t *testing.T) { env := setup.ExprEnvironment{} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -424,7 +420,6 @@ detect: } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) @@ -518,7 +513,6 @@ detect: } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) @@ -830,7 +824,6 @@ func TestDetectForcedOS(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) @@ -1015,7 +1008,6 @@ func TestDetectDatasourceValidation(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index a2fad9ebcc7..97b13f96d9a 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -41,7 +41,6 @@ func TestSetParsed(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { tt.evt.SetParsed(tt.key, tt.value) assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) @@ -82,7 +81,6 @@ func TestSetMeta(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { tt.evt.SetMeta(tt.key, tt.value) assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) @@ -152,7 +150,6 @@ func TestParseIPSources(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { ips := tt.evt.ParseIPSources() assert.Equal(t, tt.expected, ips) From 4b988701ed4660165e3def1be7391f336a68e461 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Jun 2024 13:47:26 +0200 Subject: [PATCH 196/318] lint (intrange) (#2970) --- cmd/crowdsec-cli/decisions.go | 2 +- cmd/crowdsec-cli/machines.go | 2 +- cmd/crowdsec-cli/prettytable.go | 2 +- cmd/crowdsec/crowdsec.go | 6 +++--- pkg/acquisition/acquisition.go | 4 ++-- pkg/acquisition/acquisition_test.go | 6 +++--- pkg/acquisition/modules/appsec/appsec.go | 2 +- pkg/acquisition/modules/docker/utils.go | 2 +- pkg/acquisition/modules/file/file_test.go | 2 +- pkg/acquisition/modules/kinesis/kinesis.go | 4 ++-- pkg/acquisition/modules/kinesis/kinesis_test.go | 8 ++++---- pkg/acquisition/modules/loki/loki_test.go | 2 +- .../modules/syslog/internal/parser/utils/utils.go | 2 +- pkg/apiclient/auth_retry.go | 2 +- pkg/apiserver/apic_test.go | 2 +- pkg/apiserver/controllers/v1/alerts.go | 2 +- pkg/csplugin/watcher_test.go | 2 +- pkg/exprhelpers/debugger.go | 2 +- pkg/exprhelpers/helpers.go | 2 +- pkg/leakybucket/manager_run.go | 2 +- pkg/parser/parsing_test.go | 4 ++-- 21 files changed, 31 insertions(+), 31 deletions(-) diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index df35fb9bb18..92a0de72e58 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -28,7 +28,7 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin spamLimit := make(map[string]bool) skipped := 0 - for aIdx := 0; aIdx < len(*alerts); aIdx++ { + for aIdx := range len(*alerts) { alertItem := (*alerts)[aIdx] newDecisions := make([]*models.Decision, 0) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 20933dc28e5..9014d3d803a 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -41,7 +41,7 @@ func generatePassword(length int) string { buf := make([]byte, length) - for i := 0; i < length; i++ { + for i := range length { rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) if err != nil { log.Fatalf("failed getting data from prng for password generation : %s", err) diff --git a/cmd/crowdsec-cli/prettytable.go b/cmd/crowdsec-cli/prettytable.go index 0ce7e9755f9..f17472722f1 100644 --- a/cmd/crowdsec-cli/prettytable.go +++ b/cmd/crowdsec-cli/prettytable.go @@ -91,7 +91,7 @@ func newLightTable(output io.Writer) *Table { func (t *Table) setColumnConfigs() { configs := []table.ColumnConfig{} // the go-pretty table does not expose the names or number of columns - for i := 0; i < len(t.align); i++ { + for i := range len(t.align) { configs = append(configs, table.ColumnConfig{ Number: i + 1, AlignHeader: t.alignHeader[i], diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 0fd269b3537..2be8a84fec0 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -70,7 +70,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H parsersTomb.Go(func() error { parserWg.Add(1) - for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ { + for range cConfig.Crowdsec.ParserRoutinesCount { parsersTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runParse") @@ -101,7 +101,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H } } - for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ { + for range cConfig.Crowdsec.BucketsRoutinesCount { bucketsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runPour") @@ -127,7 +127,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H outputsTomb.Go(func() error { outputWg.Add(1) - for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ { + for range cConfig.Crowdsec.OutputRoutinesCount { outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 069bf67d852..6a91423c158 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -270,7 +270,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector - for i := 0; i < len(sources); i++ { + for i := range len(sources) { if aggregated { metrics = sources[i].GetMetrics() } else { @@ -344,7 +344,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb return nil } - for i := 0; i < len(sources); i++ { + for i := range len(sources) { subsrc := sources[i] //ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index cc57aecb732..a5eecbc20ed 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -321,7 +321,7 @@ func (f *MockCat) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockCat) GetName() string { return "mock_cat" } func (f *MockCat) GetMode() string { return "cat" } func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { - for i := 0; i < 10; i++ { + for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt @@ -368,7 +368,7 @@ func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) err return errors.New("can't run in cat mode") } func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { - for i := 0; i < 10; i++ { + for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt @@ -451,7 +451,7 @@ type MockTailError struct { } func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { - for i := 0; i < 10; i++ { + for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 1d9a9bfec3c..e1415faa25b 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -202,7 +202,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.AppsecRunners = make([]AppsecRunner, w.config.Routines) - for nbRoutine := 0; nbRoutine < w.config.Routines; nbRoutine++ { + for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() //we copy AppsecRutime for each runner wrt := *w.AppsecRuntime diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go index 5439163e5b9..c724f581194 100644 --- a/pkg/acquisition/modules/docker/utils.go +++ b/pkg/acquisition/modules/docker/utils.go @@ -22,7 +22,7 @@ func parseKeyToMap(m map[string]interface{}, key string, value string) { return } - for i := 0; i < len(parts); i++ { + for i := range len(parts) { if parts[i] == "" { return } diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 2f84c6de324..5d38552b3c5 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -401,7 +401,7 @@ force_inotify: true`, testPattern), fd, err := os.Create("test_files/stream.log") require.NoError(t, err, "could not create test file") - for i := 0; i < 5; i++ { + for i := range 5 { _, err = fmt.Fprintf(fd, "%d\n", i) if err != nil { os.Remove("test_files/stream.log") diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 485cefcf01d..0e6c1980fa9 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -209,7 +209,7 @@ func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubsc func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, streamARN string) error { maxTries := k.Config.MaxRetries - for i := 0; i < maxTries; i++ { + for i := range maxTries { _, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ ConsumerName: aws.String(consumerName), StreamARN: aws.String(streamARN), @@ -250,7 +250,7 @@ func (k *KinesisSource) DeregisterConsumer() error { func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error { maxTries := k.Config.MaxRetries - for i := 0; i < maxTries; i++ { + for i := range maxTries { describeOutput, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ ConsumerARN: aws.String(consumerARN), }) diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index d1d398c129e..ae601020621 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -71,7 +71,7 @@ func WriteToStream(streamName string, count int, shards int, sub bool) { } sess := session.Must(session.NewSession()) kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) - for i := 0; i < count; i++ { + for i := range count { partition := "partition" if shards != 1 { partition = fmt.Sprintf("partition-%d", i%shards) @@ -182,7 +182,7 @@ stream_name: stream-1-shard`, //Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) - for i := 0; i < test.count; i++ { + for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } @@ -227,7 +227,7 @@ stream_name: stream-2-shards`, time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) c := 0 - for i := 0; i < test.count; i++ { + for range test.count { <-out c += 1 } @@ -273,7 +273,7 @@ from_subscription: true`, //Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, true) - for i := 0; i < test.count; i++ { + for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 051a9b93ed5..5f41cd4c62e 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -274,7 +274,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { }, }, } - for i := 0; i < n; i++ { + for i := range n { streams.Streams[0].Values[i] = LogValue{ Time: time.Now(), Line: fmt.Sprintf("Log line #%d %v", i, title), diff --git a/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go b/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go index 8fe717a6ab2..5e0bf8fe771 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go +++ b/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go @@ -34,7 +34,7 @@ func isValidHostname(s string) bool { last := byte('.') nonNumeric := false // true once we've seen a letter or hyphen partlen := 0 - for i := 0; i < len(s); i++ { + for i := range len(s) { c := s[i] switch { default: diff --git a/pkg/apiclient/auth_retry.go b/pkg/apiclient/auth_retry.go index 8ec8823f6e7..a17725439bc 100644 --- a/pkg/apiclient/auth_retry.go +++ b/pkg/apiclient/auth_retry.go @@ -41,7 +41,7 @@ func (r retryRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) maxAttempts = 1 } - for i := 0; i < maxAttempts; i++ { + for i := range maxAttempts { if i > 0 { if r.withBackOff { //nolint:gosec diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index dc6ed98f946..a3aa956ed98 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1077,7 +1077,7 @@ func TestAPICPush(t *testing.T) { expectedCalls: 2, alerts: func() []*models.Alert { alerts := make([]*models.Alert, 100) - for i := 0; i < 100; i++ { + for i := range 100 { alerts[i] = &models.Alert{ Scenario: ptr.Of("crowdsec/test"), ScenarioHash: ptr.Of("certified"), diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 7483e8dcdf9..c8cd54203bc 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -109,7 +109,7 @@ func FormatAlerts(result []*ent.Alert) models.AddAlertsRequest { func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uint) { if c.PluginChannel != nil { RETRY: - for try := 0; try < 3; try++ { + for try := range 3 { select { case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}: log.Debugf("alert sent to Plugin channel") diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index d0bb7b2f142..b76c3c4eadd 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -34,7 +34,7 @@ func resetWatcherAlertCounter(pw *PluginWatcher) { } func insertNAlertsToPlugin(pw *PluginWatcher, n int, pluginName string) { - for i := 0; i < n; i++ { + for range n { pw.Inserts <- pluginName } } diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 5ab2fc48c70..a2bd489acfa 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -346,7 +346,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } func (erp ExprRuntimeDebug) ipSeek(ip int) []string { - for i := 0; i < len(erp.Lines); i++ { + for i := range len(erp.Lines) { parts := strings.Split(erp.Lines[i], "\t") if parts[0] == strconv.Itoa(ip) { return parts diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 5c041aa2886..43be5d82de0 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -270,7 +270,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { } if v.Kind() == reflect.Array || v.Kind() == reflect.Slice { - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { args = flatten(args, v.Index(i)) } } else { diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 1d34c238ea5..673b372d81e 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event)) } //find the relevant holders (scenarios) - for idx := 0; idx < len(holders); idx++ { + for idx := range len(holders) { //for idx, holder := range holders { //evaluate bucket's condition diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index c5b1c353e9f..0542c69c049 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -151,7 +151,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } - for n := 0; n < count; n++ { + for range(count) { if !testFile(tests, *pctx, pnodes) { return errors.New("test failed") } @@ -285,7 +285,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo valid = true - for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { + for mapIdx := range(len(expectMaps)) { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] if !ok { From 0e93f98cad58a88be3952d0a45564f532538a101 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Jun 2024 14:31:45 +0200 Subject: [PATCH 197/318] lint: github.com/pkg/errors -> errors (#3091) --- .golangci.yml | 2 -- pkg/acquisition/modules/appsec/appsec.go | 11 +++++------ pkg/exprhelpers/crowdsec_cti.go | 4 ++-- pkg/exprhelpers/exprlib_test.go | 16 ++++++++-------- 4 files changed, 15 insertions(+), 18 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index bb20d42699f..ffe634ca0ab 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -52,8 +52,6 @@ linters-settings: desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" files: - "!**/pkg/database/*.go" - - "!**/pkg/exprhelpers/*.go" - - "!**/pkg/acquisition/modules/appsec/appsec.go" - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index e1415faa25b..b141ee6d666 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -17,7 +17,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/google/uuid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -98,7 +97,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { err := yaml.UnmarshalStrict(yamlConfig, &w.config) if err != nil { - return errors.Wrap(err, "Cannot parse appsec configuration") + return fmt.Errorf("Cannot parse appsec configuration: %w", err) } if w.config.ListenAddr == "" && w.config.ListenSocket == "" { @@ -153,7 +152,7 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { - return errors.Wrap(err, "unable to parse appsec configuration") + return fmt.Errorf("unable to parse appsec configuration: %w", err) } w.logger = logger w.metricsLevel = MetricsLevel @@ -263,7 +262,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) _ = os.RemoveAll(w.config.ListenSocket) listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { - return errors.Wrap(err, "Appsec server failed") + return fmt.Errorf("Appsec server failed: %w", err) } defer listener.Close() if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { @@ -272,7 +271,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) err = w.server.Serve(listener) } if err != nil && err != http.ErrServerClosed { - return errors.Wrap(err, "Appsec server failed") + return fmt.Errorf("Appsec server failed: %w", err) } } return nil @@ -288,7 +287,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } if err != nil && err != http.ErrServerClosed { - return errors.Wrap(err, "Appsec server failed") + return fmt.Errorf("Appsec server failed: %w", err) } } return nil diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 4e54b07bfc2..4103bad1d43 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -1,13 +1,13 @@ package exprhelpers import ( + "errors" "fmt" "time" "github.com/bluele/gcache" "github.com/crowdsecurity/crowdsec/pkg/cticlient" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -40,7 +40,7 @@ func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.L } clog := log.New() if err := types.ConfigureLogger(clog); err != nil { - return errors.Wrap(err, "while configuring datasource logger") + return fmt.Errorf("while configuring datasource logger: %w", err) } if LogLevel != nil { clog.SetLevel(*LogLevel) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index e65d568f65b..33025d4a992 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -2,12 +2,12 @@ package exprhelpers import ( "context" + "errors" "os" "testing" "time" "github.com/antonmedv/expr" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -936,7 +936,7 @@ func TestGetDecisionsCount(t *testing.T) { SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) @@ -1022,7 +1022,7 @@ func TestGetDecisionsSinceCount(t *testing.T) { SetOrigin("CAPI"). SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } decision2 := dbClient.Ent.Decision.Create(). @@ -1041,7 +1041,7 @@ func TestGetDecisionsSinceCount(t *testing.T) { SaveX(context.Background()) if decision2 == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) @@ -1147,7 +1147,7 @@ func TestGetActiveDecisionsCount(t *testing.T) { SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } expiredDecision := dbClient.Ent.Decision.Create(). @@ -1165,7 +1165,7 @@ func TestGetActiveDecisionsCount(t *testing.T) { SaveX(context.Background()) if expiredDecision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) @@ -1253,7 +1253,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } longerDecision := dbClient.Ent.Decision.Create(). @@ -1271,7 +1271,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { SaveX(context.Background()) if longerDecision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) From 3097c0ce908cd103aca734a22af58fcd36022772 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 25 Jun 2024 11:28:27 +0200 Subject: [PATCH 198/318] lint: replace gocognit with revive (#3094) --- .golangci.yml | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ffe634ca0ab..ea871205485 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,10 +12,6 @@ linters-settings: replace-allow-list: - golang.org/x/time/rate - gocognit: - # lower this after refactoring - min-complexity: 118 - govet: enable-all: true disable: @@ -100,7 +96,8 @@ linters-settings: - name: add-constant disabled: true - name: cognitive-complexity - disabled: true + # lower this after refactoring + arguments: [119] - name: comment-spacings disabled: true - name: confusing-results @@ -192,10 +189,11 @@ linters: # Redundant # - - gocyclo # revive - - cyclop # revive - - lll # revive - - funlen # revive + - gocyclo # revive + - cyclop # revive + - lll # revive + - funlen # revive + - gocognit # revive # # Disabled until fixed for go 1.22 @@ -223,7 +221,6 @@ linters: # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code # - gochecksumtype # Run exhaustiveness checks on Go "sum types" - # - gocognit # Computes and checks the cognitive complexity of functions # - gocritic # Provides diagnostics that check for bugs, performance and style issues. # - goheader # Checks is file header matches to pattern # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. From a3bd9baec1ff5faf1ae3e126d45bfa68601bd805 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 26 Jun 2024 10:41:30 +0200 Subject: [PATCH 199/318] improved tls middleware revocation checks (#3034) --- .golangci.yml | 4 - docker/test/tests/test_tls.py | 2 +- pkg/apiclient/alerts_service.go | 2 - pkg/apiclient/auth_service.go | 2 - pkg/apiserver/middlewares/v1/api_key.go | 12 +- pkg/apiserver/middlewares/v1/cache.go | 99 ++++++ pkg/apiserver/middlewares/v1/crl.go | 145 +++++++++ pkg/apiserver/middlewares/v1/jwt.go | 31 +- pkg/apiserver/middlewares/v1/ocsp.go | 100 ++++++ pkg/apiserver/middlewares/v1/tls_auth.go | 305 ++++++------------ test/bats/11_bouncers_tls.bats | 181 ++++++++--- test/bats/30_machines_tls.bats | 194 +++++++---- test/bats/testdata/cfssl/agent.json | 16 +- test/bats/testdata/cfssl/agent_invalid.json | 16 +- test/bats/testdata/cfssl/bouncer.json | 16 +- test/bats/testdata/cfssl/bouncer_invalid.json | 16 +- test/bats/testdata/cfssl/ca.json | 16 - ...intermediate.json => ca_intermediate.json} | 18 +- test/bats/testdata/cfssl/ca_root.json | 16 + test/bats/testdata/cfssl/profiles.json | 71 ++-- test/bats/testdata/cfssl/server.json | 24 +- test/lib/setup_file.sh | 5 + 22 files changed, 835 insertions(+), 456 deletions(-) create mode 100644 pkg/apiserver/middlewares/v1/cache.go create mode 100644 pkg/apiserver/middlewares/v1/crl.go create mode 100644 pkg/apiserver/middlewares/v1/ocsp.go delete mode 100644 test/bats/testdata/cfssl/ca.json rename test/bats/testdata/cfssl/{intermediate.json => ca_intermediate.json} (53%) create mode 100644 test/bats/testdata/cfssl/ca_root.json diff --git a/.golangci.yml b/.golangci.yml index ea871205485..d89c8e9ed01 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -462,10 +462,6 @@ issues: path: pkg/hubtest/hubtest_item.go text: "cyclomatic: .*RunWithLogFile" - - linters: - - canonicalheader - path: pkg/apiserver/middlewares/v1/tls_auth.go - # tolerate complex functions in tests for now - linters: - maintidx diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index fe899b000af..d2f512fcbc1 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -281,7 +281,7 @@ def test_tls_client_ou(crowdsec, flavor, certs_dir): lapi.wait_for_http(8080, '/health', want_status=None) with cs_agent as agent: lapi.wait_for_log([ - "*client certificate OU (?custom-client-ou?) doesn't match expected OU (?agent-ou?)*", + "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", ]) lapi_env['AGENTS_ALLOWED_OU'] = 'custom-client-ou' diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go index ad75dd39342..a3da84d306e 100644 --- a/pkg/apiclient/alerts_service.go +++ b/pkg/apiclient/alerts_service.go @@ -10,8 +10,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -// type ApiAlerts service - type AlertsService service type AlertsListOpts struct { diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go index e4350385237..e7a423cfd95 100644 --- a/pkg/apiclient/auth_service.go +++ b/pkg/apiclient/auth_service.go @@ -8,8 +8,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -// type ApiAlerts service - type AuthService service // Don't add it to the models, as they are used with LAPI, but the enroll endpoint is specific to CAPI diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 314a4da1046..e822666db0f 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -60,18 +60,13 @@ func HashSHA512(str string) string { func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { if a.TlsAuth == nil { - logger.Error("TLS Auth is not configured but client presented a certificate") - return nil - } - - validCert, extractedCN, err := a.TlsAuth.ValidateCert(c) - if !validCert { - logger.Error(err) + logger.Warn("TLS Auth is not configured but client presented a certificate") return nil } + extractedCN, err := a.TlsAuth.ValidateCert(c) if err != nil { - logger.Error(err) + logger.Warn(err) return nil } @@ -148,6 +143,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } if bouncer == nil { + // XXX: StatusUnauthorized? c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() diff --git a/pkg/apiserver/middlewares/v1/cache.go b/pkg/apiserver/middlewares/v1/cache.go new file mode 100644 index 00000000000..a058ec40393 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/cache.go @@ -0,0 +1,99 @@ +package v1 + +import ( + "crypto/x509" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +type cacheEntry struct { + err error // if nil, the certificate is not revocated + timestamp time.Time +} + +type RevocationCache struct { + mu sync.RWMutex + cache map[string]cacheEntry + expiration time.Duration + lastPurge time.Time + logger *log.Entry +} + +func NewRevocationCache(expiration time.Duration, logger *log.Entry) *RevocationCache { + return &RevocationCache{ + cache: make(map[string]cacheEntry), + expiration: expiration, + lastPurge: time.Now(), + logger: logger, + } +} + +func (*RevocationCache) generateKey(cert *x509.Certificate) string { + return cert.SerialNumber.String() + "-" + cert.Issuer.String() +} + +// purge removes expired entries from the cache +func (rc *RevocationCache) purgeExpired() { + // we don't keep a separate interval for the full sweep, we'll just double the expiration + if time.Since(rc.lastPurge) < rc.expiration { + return + } + + rc.mu.Lock() + defer rc.mu.Unlock() + + for key, entry := range rc.cache { + if time.Since(entry.timestamp) > rc.expiration { + rc.logger.Debugf("purging expired entry for cert %s", key) + delete(rc.cache, key) + } + } +} + +func (rc *RevocationCache) Get(cert *x509.Certificate) (error, bool) { //nolint:revive + rc.purgeExpired() + key := rc.generateKey(cert) + rc.mu.RLock() + entry, exists := rc.cache[key] + rc.mu.RUnlock() + + if !exists { + rc.logger.Tracef("no cached value for cert %s", key) + return nil, false + } + + // Upgrade to write lock to potentially modify the cache + rc.mu.Lock() + defer rc.mu.Unlock() + + if entry.timestamp.Add(rc.expiration).Before(time.Now()) { + rc.logger.Debugf("cached value for %s expired, removing from cache", key) + delete(rc.cache, key) + + return nil, false + } + + rc.logger.Debugf("using cached value for cert %s: %v", key, entry.err) + + return entry.err, true +} + +func (rc *RevocationCache) Set(cert *x509.Certificate, err error) { + key := rc.generateKey(cert) + + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.cache[key] = cacheEntry{ + err: err, + timestamp: time.Now(), + } +} + +func (rc *RevocationCache) Empty() { + rc.mu.Lock() + defer rc.mu.Unlock() + rc.cache = make(map[string]cacheEntry) +} diff --git a/pkg/apiserver/middlewares/v1/crl.go b/pkg/apiserver/middlewares/v1/crl.go new file mode 100644 index 00000000000..f85a410998e --- /dev/null +++ b/pkg/apiserver/middlewares/v1/crl.go @@ -0,0 +1,145 @@ +package v1 + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "os" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +type CRLChecker struct { + path string // path to the CRL file + fileInfo os.FileInfo // last stat of the CRL file + crls []*x509.RevocationList // parsed CRLs + logger *log.Entry + mu sync.RWMutex + lastLoad time.Time // time when the CRL file was last read successfully + onLoad func() // called when the CRL file changes (and is read successfully) +} + +func NewCRLChecker(crlPath string, onLoad func(), logger *log.Entry) (*CRLChecker, error) { + cc := &CRLChecker{ + path: crlPath, + logger: logger, + onLoad: onLoad, + } + + err := cc.refresh() + if err != nil { + return nil, err + } + + return cc, nil +} + +func (*CRLChecker) decodeCRLs(content []byte) ([]*x509.RevocationList, error) { + var crls []*x509.RevocationList + + for { + block, rest := pem.Decode(content) + if block == nil { + break // no more PEM blocks + } + + content = rest + + crl, err := x509.ParseRevocationList(block.Bytes) + if err != nil { + // invalidate the whole CRL file so we can still use the previous version + return nil, fmt.Errorf("could not parse file: %w", err) + } + + crls = append(crls, crl) + } + + return crls, nil +} + +// refresh() reads the CRL file if new or changed since the last time +func (cc *CRLChecker) refresh() error { + // noop if lastLoad is less than 5 seconds ago + if time.Since(cc.lastLoad) < 5*time.Second { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.logger.Debugf("loading CRL file from %s", cc.path) + + fileInfo, err := os.Stat(cc.path) + if err != nil { + return fmt.Errorf("could not access CRL file: %w", err) + } + + // noop if the file didn't change + if cc.fileInfo != nil && fileInfo.ModTime().Equal(cc.fileInfo.ModTime()) && fileInfo.Size() == cc.fileInfo.Size() { + return nil + } + + // the encoding/pem package wants bytes, not io.Reader + crlContent, err := os.ReadFile(cc.path) + if err != nil { + return fmt.Errorf("could not read CRL file: %w", err) + } + + cc.crls, err = cc.decodeCRLs(crlContent) + if err != nil { + return err + } + + cc.fileInfo = fileInfo + cc.lastLoad = time.Now() + cc.onLoad() + + return nil +} + +// isRevoked checks if the client certificate is revoked by any of the CRL blocks +// It returns a boolean indicating if the certificate is revoked and a boolean indicating +// if the CRL check was successful and could be cached. +func (cc *CRLChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { + if cc == nil { + return false, true + } + + err := cc.refresh() + if err != nil { + // we can't quit obviously, so we just log the error and continue + // but we can assume we have loaded a CRL, or it would have quit the first time + cc.logger.Errorf("while refreshing CRL: %s - will keep using CRL file read at %s", err, + cc.lastLoad.Format(time.RFC3339)) + } + + now := time.Now().UTC() + + cc.mu.RLock() + defer cc.mu.RUnlock() + + for _, crl := range cc.crls { + if err := crl.CheckSignatureFrom(issuer); err != nil { + continue + } + + if now.After(crl.NextUpdate) { + cc.logger.Warn("CRL has expired, will still validate the cert against it.") + } + + if now.Before(crl.ThisUpdate) { + cc.logger.Warn("CRL is not yet valid, will still validate the cert against it.") + } + + for _, revoked := range crl.RevokedCertificateEntries { + if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { + cc.logger.Warn("client certificate is revoked by CRL") + return true, true + } + } + } + + return false, true +} diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 735c5f058cb..64406deff3e 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -4,7 +4,6 @@ import ( "crypto/rand" "errors" "fmt" - "net/http" "os" "strings" "time" @@ -59,27 +58,19 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { ret := authInput{} if j.TlsAuth == nil { - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() + err := errors.New("tls authentication required") + log.Warn(err) - return nil, errors.New("TLS auth is not configured") + return nil, err } - validCert, extractedCN, err := j.TlsAuth.ValidateCert(c) + extractedCN, err := j.TlsAuth.ValidateCert(c) if err != nil { - log.Error(err) - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - - return nil, fmt.Errorf("while trying to validate client cert: %w", err) + log.Warn(err) + return nil, err } - if !validCert { - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - - return nil, errors.New("failed cert authentication") - } + logger := log.WithField("ip", c.ClientIP()) ret.machineID = fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) @@ -88,14 +79,12 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { First(j.DbClient.CTX) if ent.IsNotFound(err) { // Machine was not found, let's create it - log.Infof("machine %s not found, create it", ret.machineID) + logger.Infof("machine %s not found, create it", ret.machineID) // let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) pwd, err := GenerateAPIKey(dummyAPIKeySize) if err != nil { - log.WithFields(log.Fields{ - "ip": c.ClientIP(), - "cn": extractedCN, - }).Errorf("error generating password: %s", err) + logger.WithField("cn", extractedCN). + Errorf("error generating password: %s", err) return nil, errors.New("error generating password") } diff --git a/pkg/apiserver/middlewares/v1/ocsp.go b/pkg/apiserver/middlewares/v1/ocsp.go new file mode 100644 index 00000000000..24557bfda7b --- /dev/null +++ b/pkg/apiserver/middlewares/v1/ocsp.go @@ -0,0 +1,100 @@ +package v1 + +import ( + "bytes" + "crypto" + "crypto/x509" + "io" + "net/http" + "net/url" + + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/ocsp" +) + +type OCSPChecker struct { + logger *log.Entry +} + +func NewOCSPChecker(logger *log.Entry) *OCSPChecker { + return &OCSPChecker{ + logger: logger, + } +} + +func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { + req, err := ocsp.CreateRequest(cert, issuer, &ocsp.RequestOptions{Hash: crypto.SHA256}) + if err != nil { + oc.logger.Errorf("TLSAuth: error creating OCSP request: %s", err) + return nil, err + } + + httpRequest, err := http.NewRequest(http.MethodPost, server, bytes.NewBuffer(req)) + if err != nil { + oc.logger.Error("TLSAuth: cannot create HTTP request for OCSP") + return nil, err + } + + ocspURL, err := url.Parse(server) + if err != nil { + oc.logger.Error("TLSAuth: cannot parse OCSP URL") + return nil, err + } + + httpRequest.Header.Add("Content-Type", "application/ocsp-request") + httpRequest.Header.Add("Accept", "application/ocsp-response") + httpRequest.Header.Add("Host", ocspURL.Host) + + httpClient := &http.Client{} + + // XXX: timeout, context? + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + oc.logger.Error("TLSAuth: cannot send HTTP request to OCSP") + return nil, err + } + defer httpResponse.Body.Close() + + output, err := io.ReadAll(httpResponse.Body) + if err != nil { + oc.logger.Error("TLSAuth: cannot read HTTP response from OCSP") + return nil, err + } + + ocspResponse, err := ocsp.ParseResponseForCert(output, cert, issuer) + + return ocspResponse, err +} + +// isRevokedBy checks if the client certificate is revoked by the issuer via any of the OCSP servers present in the certificate. +// It returns a boolean indicating if the certificate is revoked and a boolean indicating +// if the OCSP check was successful and could be cached. +func (oc *OCSPChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { + if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { + oc.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") + return false, true + } + + for _, server := range cert.OCSPServer { + ocspResponse, err := oc.query(server, cert, issuer) + if err != nil { + oc.logger.Errorf("TLSAuth: error querying OCSP server %s: %s", server, err) + continue + } + + switch ocspResponse.Status { + case ocsp.Good: + return false, true + case ocsp.Revoked: + oc.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server) + return true, true + case ocsp.Unknown: + log.Debugf("unknown OCSP status for server %s", server) + continue + } + } + + log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") + + return true, false +} diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index c2fcc9c7264..673c8d0cdce 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -1,79 +1,24 @@ package v1 import ( - "bytes" - "crypto" "crypto/x509" - "encoding/pem" "errors" "fmt" - "io" - "net/http" - "net/url" - "os" + "slices" "time" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - "golang.org/x/crypto/ocsp" ) type TLSAuth struct { AllowedOUs []string - CrlPath string - revocationCache map[string]cacheEntry - cacheExpiration time.Duration + crlChecker *CRLChecker + ocspChecker *OCSPChecker + revocationCache *RevocationCache logger *log.Entry } -type cacheEntry struct { - revoked bool - timestamp time.Time -} - -func (ta *TLSAuth) ocspQuery(server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { - req, err := ocsp.CreateRequest(cert, issuer, &ocsp.RequestOptions{Hash: crypto.SHA256}) - if err != nil { - ta.logger.Errorf("TLSAuth: error creating OCSP request: %s", err) - return nil, err - } - - httpRequest, err := http.NewRequest(http.MethodPost, server, bytes.NewBuffer(req)) - if err != nil { - ta.logger.Error("TLSAuth: cannot create HTTP request for OCSP") - return nil, err - } - - ocspURL, err := url.Parse(server) - if err != nil { - ta.logger.Error("TLSAuth: cannot parse OCSP URL") - return nil, err - } - - httpRequest.Header.Add("Content-Type", "application/ocsp-request") - httpRequest.Header.Add("Accept", "application/ocsp-response") - httpRequest.Header.Add("host", ocspURL.Host) - - httpClient := &http.Client{} - - httpResponse, err := httpClient.Do(httpRequest) - if err != nil { - ta.logger.Error("TLSAuth: cannot send HTTP request to OCSP") - return nil, err - } - defer httpResponse.Body.Close() - - output, err := io.ReadAll(httpResponse.Body) - if err != nil { - ta.logger.Error("TLSAuth: cannot read HTTP response from OCSP") - return nil, err - } - - ocspResponse, err := ocsp.ParseResponseForCert(output, cert, issuer) - - return ocspResponse, err -} - func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { now := time.Now().UTC() @@ -90,211 +35,147 @@ func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { return false } -// isOCSPRevoked checks if the client certificate is revoked by any of the OCSP servers present in the certificate. -// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the OCSP check was successful and could be cached. -func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { - if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { - ta.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") - return false, true - } +// checkRevocationPath checks a single chain against OCSP and CRL +func (ta *TLSAuth) checkRevocationPath(chain []*x509.Certificate) (error, bool) { //nolint:revive + // if we ever fail to check OCSP or CRL, we should not cache the result + couldCheck := true - for _, server := range cert.OCSPServer { - ocspResponse, err := ta.ocspQuery(server, cert, issuer) - if err != nil { - ta.logger.Errorf("TLSAuth: error querying OCSP server %s: %s", server, err) - continue - } + // starting from the root CA and moving towards the leaf certificate, + // check for revocation of intermediates too + for i := len(chain) - 1; i > 0; i-- { + cert := chain[i-1] + issuer := chain[i] - switch ocspResponse.Status { - case ocsp.Good: - return false, true - case ocsp.Revoked: - ta.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server) - return true, true - case ocsp.Unknown: - log.Debugf("unknow OCSP status for server %s", server) - continue + revokedByOCSP, checkedByOCSP := ta.ocspChecker.isRevokedBy(cert, issuer) + couldCheck = couldCheck && checkedByOCSP + + if revokedByOCSP && checkedByOCSP { + return errors.New("certificate revoked by OCSP"), couldCheck } - } - log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") + revokedByCRL, checkedByCRL := ta.crlChecker.isRevokedBy(cert, issuer) + couldCheck = couldCheck && checkedByCRL - return true, false -} - -// isCRLRevoked checks if the client certificate is revoked by the CRL present in the CrlPath. -// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the CRL check was successful and could be cached. -func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) { - if ta.CrlPath == "" { - ta.logger.Info("no crl_path, skipping CRL check") - return false, true + if revokedByCRL && checkedByCRL { + return errors.New("certificate revoked by CRL"), couldCheck + } } - crlContent, err := os.ReadFile(ta.CrlPath) - if err != nil { - ta.logger.Errorf("could not read CRL file, skipping check: %s", err) - return false, false - } + return nil, couldCheck +} - var crlBlock *pem.Block +func (ta *TLSAuth) setAllowedOu(allowedOus []string) error { + uniqueOUs := make(map[string]struct{}) - for { - crlBlock, crlContent = pem.Decode(crlContent) - if crlBlock == nil { - break // no more PEM blocks + for _, ou := range allowedOus { + // disallow empty ou + if ou == "" { + return errors.New("allowed_ou configuration contains invalid empty string") } - crl, err := x509.ParseRevocationList(crlBlock.Bytes) - if err != nil { - ta.logger.Errorf("could not parse a PEM block in CRL file, skipping: %s", err) + if _, exists := uniqueOUs[ou]; exists { + ta.logger.Warningf("dropping duplicate ou %s", ou) continue } - now := time.Now().UTC() + uniqueOUs[ou] = struct{}{} - if now.After(crl.NextUpdate) { - ta.logger.Warn("CRL has expired, will still validate the cert against it.") - } + ta.AllowedOUs = append(ta.AllowedOUs, ou) + } - if now.Before(crl.ThisUpdate) { - ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") - } + return nil +} - for _, revoked := range crl.RevokedCertificateEntries { - if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { - ta.logger.Warn("client certificate is revoked by CRL") - return true, true - } +func (ta *TLSAuth) checkAllowedOU(ous []string) error { + for _, ou := range ous { + if slices.Contains(ta.AllowedOUs, ou) { + return nil } } - return false, true + return fmt.Errorf("client certificate OU %v doesn't match expected OU %v", ous, ta.AllowedOUs) } -func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { - sn := cert.SerialNumber.String() - if cacheValue, ok := ta.revocationCache[sn]; ok { - if time.Now().UTC().Sub(cacheValue.timestamp) < ta.cacheExpiration { - ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t", sn, cacheValue.revoked) - return cacheValue.revoked, nil - } +func (ta *TLSAuth) ValidateCert(c *gin.Context) (string, error) { + // Checks cert validity, Returns true + CN if client cert matches requested OU + var leaf *x509.Certificate - ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") - delete(ta.revocationCache, sn) - } else { - ta.logger.Tracef("TLSAuth: no cached value for cert %s", sn) + if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { + return "", errors.New("no certificate in request") } - revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer) - revokedByCRL, cacheCRL := ta.isCRLRevoked(cert) - revoked := revokedByOCSP || revokedByCRL - - if cacheOCSP && cacheCRL { - ta.revocationCache[sn] = cacheEntry{ - revoked: revoked, - timestamp: time.Now().UTC(), - } + if len(c.Request.TLS.VerifiedChains) == 0 { + return "", errors.New("no verified cert in request") } - return revoked, nil -} + // although there can be multiple chains, the leaf certificate is the same + // we take the first one + leaf = c.Request.TLS.VerifiedChains[0][0] -func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { - if ta.isExpired(cert) { - return true, nil + if err := ta.checkAllowedOU(leaf.Subject.OrganizationalUnit); err != nil { + return "", err } - revoked, err := ta.isRevoked(cert, issuer) - if err != nil { - // Fail securely, if we can't check the revocation status, let's consider the cert invalid - // We may change this in the future based on users feedback, but this seems the most sensible thing to do - return true, fmt.Errorf("could not check for client certification revocation status: %w", err) + if ta.isExpired(leaf) { + return "", errors.New("client certificate is expired") } - return revoked, nil -} - -func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { - for _, ou := range allowedOus { - // disallow empty ou - if ou == "" { - return errors.New("empty ou isn't allowed") + if validErr, cached := ta.revocationCache.Get(leaf); cached { + if validErr != nil { + return "", fmt.Errorf("(cache) %w", validErr) } - // drop & warn on duplicate ou - ok := true - - for _, validOu := range ta.AllowedOUs { - if validOu == ou { - ta.logger.Warningf("dropping duplicate ou %s", ou) - - ok = false - } - } - - if ok { - ta.AllowedOUs = append(ta.AllowedOUs, ou) - } + return leaf.Subject.CommonName, nil } - return nil -} + okToCache := true -func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { - // Checks cert validity, Returns true + CN if client cert matches requested OU - var clientCert *x509.Certificate - - if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { - // do not error if it's not TLS or there are no peer certs - return false, "", nil - } + var validErr error - if len(c.Request.TLS.VerifiedChains) > 0 { - validOU := false - clientCert = c.Request.TLS.VerifiedChains[0][0] - - for _, ou := range clientCert.Subject.OrganizationalUnit { - for _, allowedOu := range ta.AllowedOUs { - if allowedOu == ou { - validOU = true - break - } - } - } + var couldCheck bool - if !validOU { - return false, "", fmt.Errorf("client certificate OU (%v) doesn't match expected OU (%v)", - clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) - } + for _, chain := range c.Request.TLS.VerifiedChains { + validErr, couldCheck = ta.checkRevocationPath(chain) + okToCache = okToCache && couldCheck - revoked, err := ta.isInvalid(clientCert, c.Request.TLS.VerifiedChains[0][1]) - if err != nil { - ta.logger.Errorf("TLSAuth: error checking if client certificate is revoked: %s", err) - return false, "", fmt.Errorf("could not check for client certification revocation status: %w", err) - } - - if revoked { - return false, "", fmt.Errorf("client certificate for CN=%s OU=%s is revoked", clientCert.Subject.CommonName, clientCert.Subject.OrganizationalUnit) + if validErr != nil { + break } + } - ta.logger.Debugf("client OU %v is allowed vs required OU %v", clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) + if okToCache { + ta.revocationCache.Set(leaf, validErr) + } - return true, clientCert.Subject.CommonName, nil + if validErr != nil { + return "", validErr } - return false, "", errors.New("no verified cert in request") + return leaf.Subject.CommonName, nil } func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { + var err error + + cache := NewRevocationCache(cacheExpiration, logger) + ta := &TLSAuth{ - revocationCache: map[string]cacheEntry{}, - cacheExpiration: cacheExpiration, - CrlPath: crlPath, + revocationCache: cache, + ocspChecker: NewOCSPChecker(logger), logger: logger, } - err := ta.SetAllowedOu(allowedOus) - if err != nil { + switch crlPath { + case "": + logger.Info("no crl_path, skipping CRL checks") + default: + ta.crlChecker, err = NewCRLChecker(crlPath, cache.Empty, logger) + if err != nil { + return nil, err + } + } + + if err := ta.setAllowedOu(allowedOus); err != nil { return nil, err } diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 6b4986d45d7..765e93ebee2 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -3,6 +3,19 @@ set -u +# root: root CA +# inter: intermediate CA +# inter_rev: intermediate CA revoked by root (CRL3) +# leaf: valid client cert +# leaf_rev1: client cert revoked by inter (CRL1) +# leaf_rev2: client cert revoked by inter (CRL2) +# leaf_rev3: client cert (indirectly) revoked by root +# +# CRL1: inter revokes leaf_rev1 +# CRL2: inter revokes leaf_rev2 +# CRL3: root revokes inter_rev +# CRL4: root revokes leaf, but is ignored + setup_file() { load "../lib/setup_file.sh" ./instance-data load @@ -10,43 +23,96 @@ setup_file() { tmpdir="$BATS_FILE_TMPDIR" export tmpdir - CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" + CFDIR="$BATS_TEST_DIRNAME/testdata/cfssl" export CFDIR - # Generate the CA - cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - - # Generate an intermediate - cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - - # Generate server cert for crowdsec with the intermediate - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - - # Generate client cert for the bouncer - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer" - - # Genearte client cert for the bouncer with an invalid OU - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_bad_ou" - - # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate - cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_invalid" - - # Generate revoked client certs - for cert_name in "revoked_1" "revoked_2"; do - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" + # Root CA + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_root.json" \ + | cfssljson --bare "$tmpdir/root" + + # Intermediate CAs (valid or revoked) + for cert in "inter" "inter_rev"; do + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_intermediate.json" \ + | cfssljson --bare "$tmpdir/$cert" + + cfssl sign -loglevel 2 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile intermediate_ca "$tmpdir/$cert.csr" \ + | cfssljson --bare "$tmpdir/$cert" done - # Generate separate CRL blocks and concatenate them - for cert_name in "revoked_1" "revoked_2"; do - echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" - echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" + # Server cert for crowdsec with the intermediate + cfssl gencert -loglevel 2 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=server "$CFDIR/server.json" \ + | cfssljson --bare "$tmpdir/server" + + # Client certs (valid or revoked) + for cert in "leaf" "leaf_rev1" "leaf_rev2"; do + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer.json" \ + | cfssljson --bare "$tmpdir/$cert" done - cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" - cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" + # Client cert (by revoked inter) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter_rev.pem" -ca-key "$tmpdir/inter_rev-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer.json" \ + | cfssljson --bare "$tmpdir/leaf_rev3" + + # Bad client cert (invalid OU) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer_invalid.json" \ + | cfssljson --bare "$tmpdir/leaf_bad_ou" + + # Bad client cert (directly signed by the CA, it should be refused by crowdsec as it uses the intermediate) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer.json" \ + | cfssljson --bare "$tmpdir/leaf_invalid" + + truncate -s 0 "$tmpdir/crl.pem" + + # Revoke certs + { + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev1.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev2.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/inter_rev.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + } >> "$tmpdir/crl.pem" + + cat "$tmpdir/root.pem" "$tmpdir/inter.pem" > "$tmpdir/bundle.pem" config_set ' .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | @@ -79,8 +145,12 @@ teardown() { assert_output "[]" } -@test "simulate one bouncer request with a valid cert" { - rune -0 curl -s --cert "${tmpdir}/bouncer.pem" --key "${tmpdir}/bouncer-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 +@test "simulate a bouncer request with a valid cert" { + rune -0 curl -f -s \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_output "null" rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) @@ -91,27 +161,54 @@ teardown() { rune cscli bouncers delete localhost@127.0.0.1 } -@test "simulate one bouncer request with an invalid cert" { - rune curl -s --cert "${tmpdir}/bouncer_invalid.pem" --key "${tmpdir}/bouncer_invalid-key.pem" --cacert "${tmpdir}/ca-key.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 +@test "simulate a bouncer request with an invalid cert" { + rune -77 curl -f -s \ + --cert "$tmpdir/leaf_invalid.pem" \ + --key "$tmpdir/leaf_invalid-key.pem" \ + --cacert "$tmpdir/root-key.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 rune -0 cscli bouncers list -o json assert_output "[]" } -@test "simulate one bouncer request with an invalid OU" { - rune curl -s --cert "${tmpdir}/bouncer_bad_ou.pem" --key "${tmpdir}/bouncer_bad_ou-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 +@test "simulate a bouncer request with an invalid OU" { + rune -0 curl -s \ + --cert "$tmpdir/leaf_bad_ou.pem" \ + --key "$tmpdir/leaf_bad_ou-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_json '{message:"access forbidden"}' rune -0 cscli bouncers list -o json assert_output "[]" } -@test "simulate one bouncer request with a revoked certificate" { +@test "simulate a bouncer request with a revoked certificate" { # we have two certificates revoked by different CRL blocks - for cert_name in "revoked_1" "revoked_2"; do + # we connect twice to test the cache too + for cert in "leaf_rev1" "leaf_rev2" "leaf_rev1" "leaf_rev2"; do truncate_log - rune -0 curl -i -s --cert "${tmpdir}/${cert_name}.pem" --key "${tmpdir}/${cert_name}-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" + rune -0 curl -s \ + --cert "$tmpdir/$cert.pem" \ + --key "$tmpdir/$cert-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_log --partial "certificate revoked by CRL" assert_output --partial "access forbidden" rune -0 cscli bouncers list -o json assert_output "[]" done } + +# vvv this test must be last, or it can break the ones that follow + +@test "allowed_ou can't contain an empty string" { + ./instance-crowdsec stop + config_set ' + .common.log_media="stdout" | + .api.server.tls.bouncers_allowed_ou=["bouncer-ou", ""] + ' + rune -1 wait-for "$CROWDSEC" + assert_stderr --partial "allowed_ou configuration contains invalid empty string" +} + +# ^^^ this test must be last, or it can break the ones that follow diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 52231704558..ef2915e3880 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -3,6 +3,20 @@ set -u + +# root: root CA +# inter: intermediate CA +# inter_rev: intermediate CA revoked by root (CRL3) +# leaf: valid client cert +# leaf_rev1: client cert revoked by inter (CRL1) +# leaf_rev2: client cert revoked by inter (CRL2) +# leaf_rev3: client cert (indirectly) revoked by root +# +# CRL1: inter revokes leaf_rev1 +# CRL2: inter revokes leaf_rev2 +# CRL3: root revokes inter_rev +# CRL4: root revokes leaf, but is ignored + setup_file() { load "../lib/setup_file.sh" ./instance-data load @@ -13,43 +27,96 @@ setup_file() { tmpdir="$BATS_FILE_TMPDIR" export tmpdir - CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" + CFDIR="$BATS_TEST_DIRNAME/testdata/cfssl" export CFDIR - # Generate the CA - cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - - # Generate an intermediate - cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - - # Generate server cert for crowdsec with the intermediate - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - - # Generate client cert for the agent - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent" - - # Genearte client cert for the agent with an invalid OU - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_bad_ou" - - # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate - cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_invalid" - - # Generate revoked client cert - for cert_name in "revoked_1" "revoked_2"; do - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" + # Root CA + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_root.json" \ + | cfssljson --bare "$tmpdir/root" + + # Intermediate CAs (valid or revoked) + for cert in "inter" "inter_rev"; do + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_intermediate.json" \ + | cfssljson --bare "$tmpdir/$cert" + + cfssl sign -loglevel 2 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile intermediate_ca "$tmpdir/$cert.csr" \ + | cfssljson --bare "$tmpdir/$cert" done - # Generate separate CRL blocks and concatenate them - for cert_name in "revoked_1" "revoked_2"; do - echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" - echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" + # Server cert for crowdsec with the intermediate + cfssl gencert -loglevel 2 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=server "$CFDIR/server.json" \ + | cfssljson --bare "$tmpdir/server" + + # Client certs (valid or revoked) + for cert in "leaf" "leaf_rev1" "leaf_rev2"; do + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent.json" \ + | cfssljson --bare "$tmpdir/$cert" done - cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" - cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" + # Client cert (by revoked inter) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter_rev.pem" -ca-key "$tmpdir/inter_rev-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent.json" \ + | cfssljson --bare "$tmpdir/leaf_rev3" + + # Bad client cert (invalid OU) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent_invalid.json" \ + | cfssljson --bare "$tmpdir/leaf_bad_ou" + + # Bad client cert (directly signed by the CA, it should be refused by crowdsec as it uses the intermediate) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent.json" \ + | cfssljson --bare "$tmpdir/leaf_invalid" + + truncate -s 0 "$tmpdir/crl.pem" + + # Revoke certs + { + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev1.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev2.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/inter_rev.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + } >> "$tmpdir/crl.pem" + + cat "$tmpdir/root.pem" "$tmpdir/inter.pem" > "$tmpdir/bundle.pem" config_set ' .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | @@ -62,7 +129,7 @@ setup_file() { # remove all machines for machine in $(cscli machines list -o json | jq -r '.[].machineId'); do - cscli machines delete "${machine}" >/dev/null 2>&1 + cscli machines delete "$machine" >/dev/null 2>&1 done config_disable_agent @@ -106,30 +173,32 @@ teardown() { } @test "invalid OU for agent" { - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent_bad_ou-key.pem" | - .cert_path=strenv(tmpdir) + "/agent_bad_ou.pem" | + .key_path=strenv(tmpdir) + "/leaf_bad_ou-key.pem" | + .cert_path=strenv(tmpdir) + "/leaf_bad_ou.pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -0 cscli machines list -o json assert_output '[]' } @test "we have exactly one machine registered with TLS" { - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent-key.pem" | - .cert_path=strenv(tmpdir) + "/agent.pem" | + .key_path=strenv(tmpdir) + "/leaf-key.pem" | + .cert_path=strenv(tmpdir) + "/leaf.pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -0 cscli lapi status + # second connection, test the tls cache + rune -0 cscli lapi status rune -0 cscli machines list -o json rune -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) @@ -154,24 +223,24 @@ teardown() { # TLS cannot be used with a unix socket - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" ' rune -1 cscli lapi status assert_stderr --partial "loading api client: cannot use TLS with a unix socket" - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' del(.ca_cert_path) | - .key_path=strenv(tmpdir) + "/agent-key.pem" + .key_path=strenv(tmpdir) + "/leaf-key.pem" ' rune -1 cscli lapi status assert_stderr --partial "loading api client: cannot use TLS with a unix socket" - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' del(.key_path) | - .cert_path=strenv(tmpdir) + "/agent.pem" + .cert_path=strenv(tmpdir) + "/leaf.pem" ' rune -1 cscli lapi status @@ -181,13 +250,13 @@ teardown() { } @test "invalid cert for agent" { - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent_invalid-key.pem" | - .cert_path=strenv(tmpdir) + "/agent_invalid.pem" | + .key_path=strenv(tmpdir) + "/leaf_invalid-key.pem" | + .cert_path=strenv(tmpdir) + "/leaf_invalid.pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -1 cscli lapi status rune -0 cscli machines list -o json @@ -196,22 +265,35 @@ teardown() { @test "revoked cert for agent" { # we have two certificates revoked by different CRL blocks - for cert_name in "revoked_1" "revoked_2"; do + # we connect twice to test the cache too + for cert in "leaf_rev1" "leaf_rev2" "leaf_rev1" "leaf_rev2"; do truncate_log - cert_name="$cert_name" config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + cert="$cert" config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/" + strenv(cert_name) + "-key.pem" | - .cert_path=strenv(tmpdir) + "/" + strenv(cert_name) + ".pem" | + .key_path=strenv(tmpdir) + "/" + strenv(cert) + "-key.pem" | + .cert_path=strenv(tmpdir) + "/" + strenv(cert) + ".pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -1 cscli lapi status - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" + assert_log --partial "certificate revoked by CRL" rune -0 cscli machines list -o json assert_output '[]' ./instance-crowdsec stop done } + +# vvv this test must be last, or it can break the ones that follow + +@test "allowed_ou can't contain an empty string" { + config_set ' + .common.log_media="stdout" | + .api.server.tls.agents_allowed_ou=["agent-ou", ""] + ' + rune -1 wait-for "$CROWDSEC" + assert_stderr --partial "allowed_ou configuration contains invalid empty string" +} + +# ^^^ this test must be last, or it can break the ones that follow diff --git a/test/bats/testdata/cfssl/agent.json b/test/bats/testdata/cfssl/agent.json index 693e3aa512b..47b342e5a40 100644 --- a/test/bats/testdata/cfssl/agent.json +++ b/test/bats/testdata/cfssl/agent.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "agent-ou", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/agent_invalid.json b/test/bats/testdata/cfssl/agent_invalid.json index c61d4dee677..eb7db8d96fb 100644 --- a/test/bats/testdata/cfssl/agent_invalid.json +++ b/test/bats/testdata/cfssl/agent_invalid.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "this-is-not-the-ou-youre-looking-for", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/bouncer.json b/test/bats/testdata/cfssl/bouncer.json index 9a07f576610..bf642c48ad8 100644 --- a/test/bats/testdata/cfssl/bouncer.json +++ b/test/bats/testdata/cfssl/bouncer.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "bouncer-ou", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/bouncer_invalid.json b/test/bats/testdata/cfssl/bouncer_invalid.json index c61d4dee677..eb7db8d96fb 100644 --- a/test/bats/testdata/cfssl/bouncer_invalid.json +++ b/test/bats/testdata/cfssl/bouncer_invalid.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "this-is-not-the-ou-youre-looking-for", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/ca.json b/test/bats/testdata/cfssl/ca.json deleted file mode 100644 index ed907e0375b..00000000000 --- a/test/bats/testdata/cfssl/ca.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "CN": "CrowdSec Test CA", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "C": "FR", - "L": "Paris", - "O": "Crowdsec", - "OU": "Crowdsec", - "ST": "France" - } - ] -} \ No newline at end of file diff --git a/test/bats/testdata/cfssl/intermediate.json b/test/bats/testdata/cfssl/ca_intermediate.json similarity index 53% rename from test/bats/testdata/cfssl/intermediate.json rename to test/bats/testdata/cfssl/ca_intermediate.json index 3996ce6e189..34f1583da06 100644 --- a/test/bats/testdata/cfssl/intermediate.json +++ b/test/bats/testdata/cfssl/ca_intermediate.json @@ -1,10 +1,10 @@ { - "CN": "CrowdSec Test CA Intermediate", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "CrowdSec Test CA Intermediate", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,8 +12,8 @@ "OU": "Crowdsec Intermediate", "ST": "France" } - ], - "ca": { + ], + "ca": { "expiry": "42720h" } - } \ No newline at end of file +} diff --git a/test/bats/testdata/cfssl/ca_root.json b/test/bats/testdata/cfssl/ca_root.json new file mode 100644 index 00000000000..a0d64796637 --- /dev/null +++ b/test/bats/testdata/cfssl/ca_root.json @@ -0,0 +1,16 @@ +{ + "CN": "CrowdSec Test CA", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "Crowdsec", + "ST": "France" + } + ] +} diff --git a/test/bats/testdata/cfssl/profiles.json b/test/bats/testdata/cfssl/profiles.json index d0dfced4a47..47611beb64c 100644 --- a/test/bats/testdata/cfssl/profiles.json +++ b/test/bats/testdata/cfssl/profiles.json @@ -1,44 +1,37 @@ { - "signing": { - "default": { + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "intermediate_ca": { + "usages": [ + "signing", + "key encipherment", + "cert sign", + "crl sign", + "server auth", + "client auth" + ], + "expiry": "8760h", + "ca_constraint": { + "is_ca": true, + "max_path_len": 0, + "max_path_len_zero": true + } + }, + "server": { + "usages": [ + "server auth" + ], "expiry": "8760h" }, - "profiles": { - "intermediate_ca": { - "usages": [ - "signing", - "digital signature", - "key encipherment", - "cert sign", - "crl sign", - "server auth", - "client auth" - ], - "expiry": "8760h", - "ca_constraint": { - "is_ca": true, - "max_path_len": 0, - "max_path_len_zero": true - } - }, - "server": { - "usages": [ - "signing", - "digital signing", - "key encipherment", - "server auth" - ], - "expiry": "8760h" - }, - "client": { - "usages": [ - "signing", - "digital signature", - "key encipherment", - "client auth" - ], - "expiry": "8760h" - } + "client": { + "usages": [ + "client auth" + ], + "expiry": "8760h" } } - } \ No newline at end of file + } +} diff --git a/test/bats/testdata/cfssl/server.json b/test/bats/testdata/cfssl/server.json index 37018259e95..cce97037ca7 100644 --- a/test/bats/testdata/cfssl/server.json +++ b/test/bats/testdata/cfssl/server.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,9 +12,9 @@ "OU": "Crowdsec Server", "ST": "France" } - ], - "hosts": [ - "127.0.0.1", - "localhost" - ] - } \ No newline at end of file + ], + "hosts": [ + "127.0.0.1", + "localhost" + ] +} diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 3e6db0f12ff..ac651c68c4f 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -155,6 +155,11 @@ assert_log() { } export -f assert_log +cert_serial_number() { + cfssl certinfo -cert "$1" | jq -r '.serial_number' +} +export -f cert_serial_number + # Compare ignoring the key order, and allow "expected" without quoted identifiers. # Preserve the output variable in case the following commands require it. assert_json() { From d4e6b75517a65513f2e1df6991d7cd30fd67d0b4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 26 Jun 2024 11:06:44 +0200 Subject: [PATCH 200/318] CI: use go 1.22.4 (#3095) * CI: use go 1.22.4 * fix alpine build --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 4 ++-- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 941d4d15f13..e537aee8d1b 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 8cc544523ef..659102ced56 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 87101be0159..1d12a371430 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 0d9906d11f0..e72973e1b1f 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 9bca2122513..d8011b16247 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index cee77161a8e..ed3f7b4a4c2 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index daf6312447b..5c2ef0e0b0d 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 41e84189383..7f192a85b8f 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index bf8d5fe5f1a..a5ce1b37df3 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index faa50f3f79a..45443e971ac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.3-alpine3.18 AS build +FROM golang:1.22.4-alpine3.20 AS build ARG BUILD_VERSION @@ -20,7 +20,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core COPY . . -RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ +RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_SOURCE" && \ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ diff --git a/Dockerfile.debian b/Dockerfile.debian index 0ef1727f3e6..6fc5a15f766 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.3-bookworm AS build +FROM golang:1.22.4-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0fc53242a9f..77015d18ef3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.3' + version: '1.22.4' - pwsh: | choco install -y make From 206211ce53554eb1bb6e947836818ec3af52af54 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 26 Jun 2024 12:16:17 +0200 Subject: [PATCH 201/318] lint: import statement order (#3085) * lint: import statement order * lint --- cmd/crowdsec/lapiclient.go | 2 +- cmd/notification-file/main.go | 11 ++-- cmd/notification-sentinel/main.go | 7 ++- pkg/acquisition/acquisition.go | 6 +-- pkg/acquisition/modules/appsec/appsec.go | 54 +++++++++---------- .../modules/appsec/appsec_hooks_test.go | 8 +-- .../modules/appsec/appsec_lnx_test.go | 6 +-- .../modules/appsec/appsec_remediation_test.go | 5 +- .../modules/appsec/appsec_rules_test.go | 8 ++- .../modules/appsec/appsec_runner.go | 10 ++-- pkg/acquisition/modules/appsec/appsec_test.go | 8 +-- pkg/acquisition/modules/appsec/rx_operator.go | 5 +- pkg/acquisition/modules/appsec/utils.go | 11 ++-- .../modules/cloudwatch/cloudwatch_test.go | 11 ++-- pkg/acquisition/modules/docker/docker_test.go | 11 ++-- .../modules/journalctl/journalctl_test.go | 9 ++-- .../modules/kinesis/kinesis_test.go | 11 ++-- .../modules/kubernetesaudit/k8s_audit_test.go | 6 +-- .../loki/internal/lokiclient/loki_client.go | 9 ++-- pkg/acquisition/modules/s3/s3_test.go | 9 ++-- .../internal/parser/rfc5424/parse_test.go | 4 +- pkg/acquisition/modules/syslog/syslog_test.go | 8 +-- pkg/alertcontext/alertcontext_test.go | 5 +- pkg/apiserver/apic_metrics.go | 2 +- pkg/apiserver/controllers/v1/utils.go | 2 +- pkg/appsec/appsec.go | 5 +- pkg/appsec/appsec_rules_collection.go | 6 +-- pkg/appsec/loader.go | 6 +-- pkg/csconfig/config.go | 3 +- pkg/csplugin/helpers.go | 3 +- pkg/exprhelpers/crowdsec_cti.go | 5 +- pkg/exprhelpers/debugger_test.go | 3 +- pkg/exprhelpers/expr_lib.go | 3 +- pkg/exprhelpers/jsonextract.go | 8 ++- pkg/leakybucket/bayesian.go | 3 +- pkg/leakybucket/bucket.go | 9 ++-- pkg/leakybucket/conditional.go | 1 + pkg/leakybucket/manager_run_test.go | 11 ++-- pkg/leakybucket/timemachine.go | 3 +- pkg/leakybucket/trigger.go | 3 +- pkg/parser/whitelist.go | 3 +- pkg/types/event.go | 2 +- 42 files changed, 151 insertions(+), 154 deletions(-) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index ae23850eb0a..6cc0fba9515 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -10,8 +10,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index 467bdd4a4ff..7fc529cff41 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -11,10 +11,11 @@ import ( "sync" "time" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) var ( @@ -69,7 +70,7 @@ func (r *LogRotate) rotateLogs(cfg PluginConfig) { } // Reopen the FileWriter FileWriter.Close() - FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { logger.Error("Failed to reopen log file", "error", err) } @@ -173,7 +174,7 @@ func WriteToFileWithCtx(ctx context.Context, cfg PluginConfig, log string) error // The file has been rotated outside our control logger.Info("Log file has been rotated or missing attempting to reopen it") FileWriter.Close() - FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return err } @@ -213,7 +214,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* return &protobufs.Empty{}, err } FileWriteMutex = &sync.Mutex{} - FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { logger.Error("Failed to open log file", "error", err) return &protobufs.Empty{}, err @@ -230,7 +231,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index 9c34e63a289..a29e941f80c 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -11,10 +11,11 @@ import ( "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -54,7 +55,6 @@ func (s *SentinelPlugin) getAuthorizationHeader(now string, length int, pluginNa } func (s *SentinelPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { - if _, ok := s.PluginConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } @@ -73,7 +73,6 @@ func (s *SentinelPlugin) Notify(ctx context.Context, notification *protobufs.Not now := time.Now().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT") authorization, err := s.getAuthorizationHeader(now, len(notification.Text), notification.Name) - if err != nil { return &protobufs.Empty{}, err } @@ -113,7 +112,7 @@ func (s *SentinelPlugin) Configure(ctx context.Context, config *protobufs.Config } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 6a91423c158..5e4a663eb9e 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -30,9 +30,8 @@ import ( s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -124,7 +123,6 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics /* configure the actual datasource */ if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) - } return &dataSrc, nil } @@ -181,7 +179,6 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom == nil { return configuration.METRICS_FULL - } if !prom.Enabled { return configuration.METRICS_NONE @@ -194,7 +191,6 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { return configuration.METRICS_FULL } return configuration.METRICS_FULL - } // LoadAcquisitionFromFile unmarshals the configuration item and checks its availability diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index b141ee6d666..5b0661a21b7 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -3,6 +3,7 @@ package appsecacquisition import ( "context" "encoding/json" + "errors" "fmt" "net" "net/http" @@ -10,17 +11,18 @@ import ( "sync" "time" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/trace" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -28,9 +30,7 @@ const ( OutOfBand = "outofband" ) -var ( - DefaultAuthCacheDuration = (1 * time.Minute) -) +var DefaultAuthCacheDuration = (1 * time.Minute) // configuration structure of the acquis for the application security engine type AppsecSourceConfig struct { @@ -94,10 +94,9 @@ type BodyResponse struct { } func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { - err := yaml.UnmarshalStrict(yamlConfig, &w.config) if err != nil { - return fmt.Errorf("Cannot parse appsec configuration: %w", err) + return fmt.Errorf("cannot parse appsec configuration: %w", err) } if w.config.ListenAddr == "" && w.config.ListenSocket == "" { @@ -122,7 +121,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { } if w.config.AppsecConfig == "" && w.config.AppsecConfigPath == "" { - return fmt.Errorf("appsec_config or appsec_config_path must be set") + return errors.New("appsec_config or appsec_config_path must be set") } if w.config.Name == "" { @@ -177,26 +176,25 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe if w.config.AppsecConfigPath != "" { err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) if err != nil { - return fmt.Errorf("unable to load appsec_config : %s", err) + return fmt.Errorf("unable to load appsec_config: %w", err) } } else if w.config.AppsecConfig != "" { err := appsecCfg.Load(w.config.AppsecConfig) if err != nil { - return fmt.Errorf("unable to load appsec_config : %s", err) + return fmt.Errorf("unable to load appsec_config: %w", err) } } else { - return fmt.Errorf("no appsec_config provided") + return errors.New("no appsec_config provided") } w.AppsecRuntime, err = appsecCfg.Build() if err != nil { - return fmt.Errorf("unable to build appsec_config : %s", err) + return fmt.Errorf("unable to build appsec_config: %w", err) } err = w.AppsecRuntime.ProcessOnLoadRules() - if err != nil { - return fmt.Errorf("unable to process on load rules : %s", err) + return fmt.Errorf("unable to process on load rules: %w", err) } w.AppsecRunners = make([]AppsecRunner, w.config.Routines) @@ -207,15 +205,15 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe wrt := *w.AppsecRuntime wrt.Logger = w.logger.Dup().WithField("runner_uuid", appsecRunnerUUID) runner := AppsecRunner{ - inChan: w.InChan, - UUID: appsecRunnerUUID, - logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), + inChan: w.InChan, + UUID: appsecRunnerUUID, + logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), AppsecRuntime: &wrt, Labels: w.config.Labels, } err := runner.Init(appsecCfg.GetDataDir()) if err != nil { - return fmt.Errorf("unable to initialize runner : %s", err) + return fmt.Errorf("unable to initialize runner: %w", err) } w.AppsecRunners[nbRoutine] = runner } @@ -228,7 +226,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe } func (w *AppsecSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { - return fmt.Errorf("AppSec datasource does not support command line acquisition") + return errors.New("AppSec datasource does not support command line acquisition") } func (w *AppsecSource) GetMode() string { @@ -240,7 +238,7 @@ func (w *AppsecSource) GetName() string { } func (w *AppsecSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("AppSec datasource does not support command line acquisition") + return errors.New("AppSec datasource does not support command line acquisition") } func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { @@ -262,7 +260,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) _ = os.RemoveAll(w.config.ListenSocket) listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { - return fmt.Errorf("Appsec server failed: %w", err) + return fmt.Errorf("appsec server failed: %w", err) } defer listener.Close() if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { @@ -271,7 +269,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) err = w.server.Serve(listener) } if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("Appsec server failed: %w", err) + return fmt.Errorf("appsec server failed: %w", err) } } return nil @@ -287,7 +285,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("Appsec server failed: %w", err) + return fmt.Errorf("appsec server failed: %w", err) } } return nil @@ -334,7 +332,6 @@ func (w *AppsecSource) IsAuth(apiKey string) bool { defer resp.Body.Close() return resp.StatusCode == http.StatusOK - } // should this be in the runner ? @@ -401,5 +398,4 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { } else { rw.Write(body) } - } diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index 65fba33ae81..c549d2ef1d1 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -5,12 +5,13 @@ import ( "net/url" "testing" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestAppsecOnMatchHooks(t *testing.T) { @@ -41,7 +42,6 @@ func TestAppsecOnMatchHooks(t *testing.T) { require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) require.Equal(t, 403, responses[0].UserHTTPResponseCode) require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, }, { diff --git a/pkg/acquisition/modules/appsec/appsec_lnx_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go index 93edc9d9ec3..3e40a1f970c 100644 --- a/pkg/acquisition/modules/appsec/appsec_lnx_test.go +++ b/pkg/acquisition/modules/appsec/appsec_lnx_test.go @@ -6,15 +6,15 @@ package appsecacquisition import ( "testing" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" ) func TestAppsecRuleTransformsOthers(t *testing.T) { - log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_remediation_test.go b/pkg/acquisition/modules/appsec/appsec_remediation_test.go index a7f117389b3..06016b6251f 100644 --- a/pkg/acquisition/modules/appsec/appsec_remediation_test.go +++ b/pkg/acquisition/modules/appsec/appsec_remediation_test.go @@ -5,14 +5,14 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/stretchr/testify/require" ) func TestAppsecDefaultPassRemediation(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic non-matching rule", @@ -152,7 +152,6 @@ func TestAppsecDefaultPassRemediation(t *testing.T) { } func TestAppsecDefaultRemediation(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index b25e4465f0f..909f16357ed 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -5,15 +5,15 @@ import ( "net/url" "testing" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" ) func TestAppsecRuleMatches(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", @@ -368,7 +368,6 @@ toto } func TestAppsecRuleTransforms(t *testing.T) { - log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ { @@ -568,7 +567,6 @@ func TestAppsecRuleTransforms(t *testing.T) { } func TestAppsecRuleZones(t *testing.T) { - log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index c015db74d82..ed49d6a7b41 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -6,16 +6,17 @@ import ( "slices" "time" - "github.com/crowdsecurity/coraza/v3" - corazatypes "github.com/crowdsecurity/coraza/v3/types" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + "github.com/crowdsecurity/coraza/v3" + corazatypes "github.com/crowdsecurity/coraza/v3/types" + // load body processors via init() _ "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec/bodyprocessors" + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/types" ) // that's the runtime structure of the Application security engine as seen from the acquis @@ -178,7 +179,6 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap } in, err = request.Tx.ProcessRequestBody() - if err != nil { r.logger.Errorf("unable to process request body : %s", err) return err diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index c769ea3d0fd..d2079b43726 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -4,12 +4,13 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" "github.com/google/uuid" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" ) type appsecRuleTest struct { @@ -120,5 +121,4 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { log.Infof("events : %s", spew.Sdump(OutputEvents)) log.Infof("responses : %s", spew.Sdump(OutputResponses)) test.output_asserts(OutputEvents, OutputResponses, appsecResponse, http_status) - } diff --git a/pkg/acquisition/modules/appsec/rx_operator.go b/pkg/acquisition/modules/appsec/rx_operator.go index 73060037657..4b16296fd40 100644 --- a/pkg/acquisition/modules/appsec/rx_operator.go +++ b/pkg/acquisition/modules/appsec/rx_operator.go @@ -5,10 +5,11 @@ import ( "strconv" "unicode/utf8" - "github.com/crowdsecurity/coraza/v3/experimental/plugins" - "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" "github.com/wasilibs/go-re2" "github.com/wasilibs/go-re2/experimental" + + "github.com/crowdsecurity/coraza/v3/experimental/plugins" + "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" ) type rx struct { diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 02ded2a2437..15de8046716 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -7,17 +7,19 @@ import ( "strconv" "time" + "github.com/oschwald/geoip2-golang" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/coraza/v3/collection" "github.com/crowdsecurity/coraza/v3/types/variables" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/oschwald/geoip2-golang" - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" ) var appsecMetaKeys = []string{ @@ -368,5 +370,4 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR } return nil - } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 89a2b56bc00..bab7593f26f 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -10,15 +10,16 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) /* @@ -51,7 +52,7 @@ func checkForLocalStackAvailability() error { _, err := net.Dial("tcp", v) if err != nil { - return fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err) + return fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } return nil diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e1cc4db96ad..e332569fb3a 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -11,17 +11,17 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" dockerTypes "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" - "github.com/stretchr/testify/assert" + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) const testContainerName = "docker_test" @@ -379,5 +379,4 @@ func TestParseLabels(t *testing.T) { assert.Equal(t, test.expected, labels) }) } - } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index f381a227534..53e2d0802ad 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -8,14 +8,15 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index ae601020621..46e404aa49b 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -12,16 +12,17 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func getLocalStackEndpoint() (string, error) { @@ -30,7 +31,7 @@ func getLocalStackEndpoint() (string, error) { v = strings.TrimPrefix(v, "http://") _, err := net.Dial("tcp", v) if err != nil { - return "", fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err) + return "", fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } } return endpoint, nil diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index b6e6f6b03e9..020bd4c91a0 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { @@ -46,7 +47,6 @@ listen_addr: 0.0.0.0`, err := f.UnmarshalConfig([]byte(test.config)) assert.Contains(t, err.Error(), test.expectedErr) - }) } } diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 359fef5bb96..420da6e391c 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -12,10 +12,11 @@ import ( "strconv" "time" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) type LokiClient struct { @@ -74,6 +75,7 @@ func (lc *LokiClient) resetFailStart() { } lc.fail_start = time.Time{} } + func (lc *LokiClient) shouldRetry() bool { if lc.fail_start.IsZero() { lc.Logger.Warningf("loki is not available, will retry for %s", lc.config.FailMaxDuration) @@ -185,7 +187,6 @@ func (lc *LokiClient) getURLFor(endpoint string, params map[string]string) strin u.RawQuery = queryParams.Encode() u.Path, err = url.JoinPath(lc.config.LokiPrefix, u.Path, endpoint) - if err != nil { return "" } @@ -254,8 +255,8 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { requestHeader.Add(k, v) } lc.Logger.Infof("Connecting to %s", u) - conn, _, err := dialer.Dial(u, requestHeader) + conn, _, err := dialer.Dial(u, requestHeader) if err != nil { lc.Logger.Errorf("Error connecting to websocket, err: %s", err) return responseChan, errors.New("error connecting to websocket") @@ -264,8 +265,8 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { lc.t.Go(func() error { for { jsonResponse := &LokiResponse{} - err = conn.ReadJSON(jsonResponse) + err = conn.ReadJSON(jsonResponse) if err != nil { lc.Logger.Errorf("Error reading from websocket: %s", err) return fmt.Errorf("websocket error: %w", err) diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index e94521d187f..93e166dfec5 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -14,11 +14,12 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { @@ -266,10 +267,8 @@ func TestDSNAcquis(t *testing.T) { time.Sleep(2 * time.Second) done <- true assert.Equal(t, test.expectedCount, linesRead) - }) } - } func TestListPolling(t *testing.T) { @@ -333,7 +332,6 @@ prefix: foo/ }() err = f.StreamingAcquisition(out, &tb) - if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -414,7 +412,6 @@ sqs_name: test }() err = f.StreamingAcquisition(out, &tb) - if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index eed72244867..0938e947fe7 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -4,9 +4,9 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestPri(t *testing.T) { diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 0e823ecd32a..1750f375138 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -7,14 +7,14 @@ import ( "testing" "time" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" + "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" - - "github.com/stretchr/testify/assert" ) func TestConfigure(t *testing.T) { diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index 8b598eab86c..c111d1bbcfb 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -4,10 +4,11 @@ import ( "fmt" "testing" - "github.com/crowdsecurity/crowdsec/pkg/models" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestNewAlertContext(t *testing.T) { diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 128ce5a9639..b8e23629e1e 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -2,10 +2,10 @@ package apiserver import ( "context" + "slices" "time" log "github.com/sirupsen/logrus" - "slices" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go index 2fcf8099e67..3cd53d217cc 100644 --- a/pkg/apiserver/controllers/v1/utils.go +++ b/pkg/apiserver/controllers/v1/utils.go @@ -9,7 +9,7 @@ import ( jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" - middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database/ent" ) diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 2c971fb36c5..e43a2ad6710 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -8,11 +8,12 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" ) type Hook struct { diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index 2024673c330..09c1670de70 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -6,10 +6,10 @@ import ( "path/filepath" "strings" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - - log "github.com/sirupsen/logrus" ) type AppsecCollection struct { @@ -51,9 +51,7 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro ret := make([]AppsecCollection, 0) for _, appsecRule := range appsecRules { - tmpMatch, err := exprhelpers.Match(pattern, appsecRule.Name) - if err != nil { logger.Errorf("unable to match %s with %s : %s", appsecRule.Name, pattern, err) continue diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 82b8d440771..56ec23e3671 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -3,9 +3,10 @@ package appsec import ( "os" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this @@ -13,7 +14,6 @@ var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probabl var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package func LoadAppsecRules(hubInstance *cwhub.Hub) error { - hub = hubInstance appsecRules = make(map[string]AppsecCollectionConfig) @@ -23,7 +23,6 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { } content, err := os.ReadFile(hubAppsecRuleItem.State.LocalPath) - if err != nil { log.Warnf("unable to read file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue @@ -32,7 +31,6 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { var rule AppsecCollectionConfig err = yaml.UnmarshalStrict(content, &rule) - if err != nil { log.Warnf("unable to unmarshal file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index e007e042bd5..ed71af4029a 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -12,10 +12,11 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" ) // defaultConfigDir is the base path to all configuration files, to be overridden in the Makefile */ diff --git a/pkg/csplugin/helpers.go b/pkg/csplugin/helpers.go index 75ee773b808..915f17e5dd3 100644 --- a/pkg/csplugin/helpers.go +++ b/pkg/csplugin/helpers.go @@ -5,9 +5,10 @@ import ( "os" "text/template" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" - log "github.com/sirupsen/logrus" ) var helpers = template.FuncMap{ diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 4103bad1d43..ccd67b27a49 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -6,9 +6,10 @@ import ( "time" "github.com/bluele/gcache" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/cticlient" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" ) var CTIUrl = "https://cti.api.crowdsec.net" @@ -111,7 +112,7 @@ func CrowdsecCTI(params ...any) (any, error) { return &cticlient.SmokeItem{}, cticlient.ErrLimit default: ctiClient.Logger.Warnf("CTI API error : %s", err) - return &cticlient.SmokeItem{}, fmt.Errorf("unexpected error : %v", err) + return &cticlient.SmokeItem{}, fmt.Errorf("unexpected error: %w", err) } } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 6832b4efaa8..5f239885539 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -6,9 +6,10 @@ import ( "testing" "github.com/antonmedv/expr" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) type ExprDbgTest struct { diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 19b25e25895..b90c1986153 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -4,8 +4,9 @@ import ( "net" "time" - "github.com/crowdsecurity/crowdsec/pkg/cticlient" "github.com/oschwald/geoip2-golang" + + "github.com/crowdsecurity/crowdsec/pkg/cticlient" ) type exprCustomFunc struct { diff --git a/pkg/exprhelpers/jsonextract.go b/pkg/exprhelpers/jsonextract.go index 6edb34e36e6..64ed97873d6 100644 --- a/pkg/exprhelpers/jsonextract.go +++ b/pkg/exprhelpers/jsonextract.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/buger/jsonparser" - log "github.com/sirupsen/logrus" ) @@ -15,11 +14,11 @@ import ( func JsonExtractLib(params ...any) (any, error) { jsblob := params[0].(string) target := params[1].([]string) + value, dataType, _, err := jsonparser.Get( jsonparser.StringToBytes(jsblob), target..., ) - if err != nil { if errors.Is(err, jsonparser.KeyPathNotFoundError) { log.Debugf("%+v doesn't exist", target) @@ -93,7 +92,6 @@ func jsonExtractType(jsblob string, target string, t jsonparser.ValueType) ([]by jsonparser.StringToBytes(jsblob), fullpath..., ) - if err != nil { if errors.Is(err, jsonparser.KeyPathNotFoundError) { log.Debugf("Key %+v doesn't exist", target) @@ -115,8 +113,8 @@ func jsonExtractType(jsblob string, target string, t jsonparser.ValueType) ([]by func JsonExtractSlice(params ...any) (any, error) { jsblob := params[0].(string) target := params[1].(string) - value, err := jsonExtractType(jsblob, target, jsonparser.Array) + value, err := jsonExtractType(jsblob, target, jsonparser.Array) if err != nil { log.Errorf("JsonExtractSlice : %s", err) return []interface{}(nil), nil @@ -136,8 +134,8 @@ func JsonExtractSlice(params ...any) (any, error) { func JsonExtractObject(params ...any) (any, error) { jsblob := params[0].(string) target := params[1].(string) - value, err := jsonExtractType(jsblob, target, jsonparser.Object) + value, err := jsonExtractType(jsblob, target, jsonparser.Object) if err != nil { log.Errorf("JsonExtractObject: %s", err) return map[string]interface{}(nil), nil diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index b8d20a488f9..e56eb097ba4 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -5,6 +5,7 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -109,7 +110,7 @@ func (b *BayesianEvent) bayesianUpdate(c *BayesianBucket, msg types.Event, l *Le l.logger.Debugf("running condition expression: %s", b.rawCondition.ConditionalFilterName) ret, err := exprhelpers.Run(b.conditionalFilterRuntime, map[string]interface{}{"evt": &msg, "queue": l.Queue, "leaky": l}, l.logger, l.BucketConfig.Debug) if err != nil { - return fmt.Errorf("unable to run conditional filter: %s", err) + return fmt.Errorf("unable to run conditional filter: %w", err) } l.logger.Tracef("bayesian bucket expression %s returned : %v", b.rawCondition.ConditionalFilterName, ret) diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index afb5377aa4f..e981551af8f 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -6,15 +6,16 @@ import ( "sync/atomic" "time" - "github.com/crowdsecurity/go-cs-lib/trace" - - "github.com/crowdsecurity/crowdsec/pkg/time/rate" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" "github.com/mohae/deepcopy" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/time/rate" + "github.com/crowdsecurity/crowdsec/pkg/types" ) // those constants are now defined in types/constants diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index 5ff69e60a26..0e4076c305c 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -6,6 +6,7 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) diff --git a/pkg/leakybucket/manager_run_test.go b/pkg/leakybucket/manager_run_test.go index 27b665f750c..f3fe08b697a 100644 --- a/pkg/leakybucket/manager_run_test.go +++ b/pkg/leakybucket/manager_run_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) func expectBucketCount(buckets *Buckets, expected int) error { @@ -20,7 +21,6 @@ func expectBucketCount(buckets *Buckets, expected int) error { return fmt.Errorf("expected %d live buckets, got %d", expected, count) } return nil - } func TestGCandDump(t *testing.T) { @@ -29,7 +29,7 @@ func TestGCandDump(t *testing.T) { tomb = &tomb.Tomb{} ) - var Holders = []BucketFactory{ + Holders := []BucketFactory{ //one overflowing soon + bh { Name: "test_counter_fast", @@ -80,7 +80,7 @@ func TestGCandDump(t *testing.T) { log.Printf("Pouring to bucket") - var in = types.Event{Parsed: map[string]string{"something": "something"}} + in := types.Event{Parsed: map[string]string{"something": "something"}} //pour an item that will go to leaky + counter ok, err := PourItemToHolders(in, Holders, buckets) if err != nil { @@ -156,7 +156,7 @@ func TestShutdownBuckets(t *testing.T) { log.Printf("Pouring to bucket") - var in = types.Event{Parsed: map[string]string{"something": "something"}} + in := types.Event{Parsed: map[string]string{"something": "something"}} //pour an item that will go to leaky + counter ok, err := PourItemToHolders(in, Holders, buckets) if err != nil { @@ -178,5 +178,4 @@ func TestShutdownBuckets(t *testing.T) { if err := expectBucketCount(buckets, 2); err != nil { t.Fatal(err) } - } diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go index 266a8be7c69..e72bb1a464c 100644 --- a/pkg/leakybucket/timemachine.go +++ b/pkg/leakybucket/timemachine.go @@ -3,8 +3,9 @@ package leakybucket import ( "time" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TimeMachinePour(l *Leaky, msg types.Event) { diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index d50d7ecc732..b6af1431888 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -3,8 +3,9 @@ package leakybucket import ( "time" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) type Trigger struct { diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go index f3739a49438..fd1c2a0546a 100644 --- a/pkg/parser/whitelist.go +++ b/pkg/parser/whitelist.go @@ -6,9 +6,10 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + "github.com/prometheus/client_golang/prometheus" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/prometheus/client_golang/prometheus" ) type Whitelist struct { diff --git a/pkg/types/event.go b/pkg/types/event.go index 90464aadf2c..7d8ef5825a2 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -4,9 +4,9 @@ import ( "net" "time" + "github.com/antonmedv/expr/vm" log "github.com/sirupsen/logrus" - "github.com/antonmedv/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/models" ) From b081065c8ecb7560587fd76e48e253b0f787f43c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 1 Jul 2024 11:54:49 +0200 Subject: [PATCH 202/318] lint: replace "github.com/pkg/errors" in apiserver (#3097) --- .golangci.yml | 1 - pkg/apiserver/controllers/v1/errors.go | 18 +++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d89c8e9ed01..66c720381de 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -48,7 +48,6 @@ linters-settings: desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" files: - "!**/pkg/database/*.go" - - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - "!**/pkg/acquisition/acquisition.go" diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index b85b811f8a7..9004528e1b1 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -1,35 +1,35 @@ package v1 import ( + "errors" "net/http" "github.com/gin-gonic/gin" - "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database" ) func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { - switch errors.Cause(err) { - case database.ItemNotFound: + switch { + case errors.Is(err, database.ItemNotFound): gctx.JSON(http.StatusNotFound, gin.H{"message": err.Error()}) return - case database.UserExists: + case errors.Is(err, database.UserExists): gctx.JSON(http.StatusForbidden, gin.H{"message": err.Error()}) return - case database.HashError: + case errors.Is(err, database.HashError): gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) return - case database.InsertFail: + case errors.Is(err, database.InsertFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return - case database.QueryFail: + case errors.Is(err, database.QueryFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return - case database.ParseTimeFail: + case errors.Is(err, database.ParseTimeFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return - case database.ParseDurationFail: + case errors.Is(err, database.ParseDurationFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return default: From 1acc35442c8410e55a38918024eb4c48e3ccdd79 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 1 Jul 2024 12:14:55 +0200 Subject: [PATCH 203/318] lapi detailed metrics: db schema (#3099) --- pkg/database/ent/bouncer.go | 37 +- pkg/database/ent/bouncer/bouncer.go | 24 + pkg/database/ent/bouncer/where.go | 240 +++++ pkg/database/ent/bouncer_create.go | 54 ++ pkg/database/ent/bouncer_update.go | 156 +++ pkg/database/ent/client.go | 152 ++- pkg/database/ent/ent.go | 2 + pkg/database/ent/hook/hook.go | 12 + pkg/database/ent/machine.go | 65 +- pkg/database/ent/machine/machine.go | 30 + pkg/database/ent/machine/where.go | 260 +++++ pkg/database/ent/machine_create.go | 75 ++ pkg/database/ent/machine_update.go | 229 +++++ pkg/database/ent/metric.go | 154 +++ pkg/database/ent/metric/metric.go | 104 ++ pkg/database/ent/metric/where.go | 330 +++++++ pkg/database/ent/metric_create.go | 246 +++++ pkg/database/ent/metric_delete.go | 88 ++ pkg/database/ent/metric_query.go | 526 ++++++++++ pkg/database/ent/metric_update.go | 228 +++++ pkg/database/ent/migrate/schema.go | 31 + pkg/database/ent/mutation.go | 1187 ++++++++++++++++++++++- pkg/database/ent/predicate/predicate.go | 3 + pkg/database/ent/schema/bouncer.go | 3 + pkg/database/ent/schema/machine.go | 11 + pkg/database/ent/schema/metric.go | 43 + pkg/database/ent/tx.go | 3 + 27 files changed, 4267 insertions(+), 26 deletions(-) create mode 100644 pkg/database/ent/metric.go create mode 100644 pkg/database/ent/metric/metric.go create mode 100644 pkg/database/ent/metric/where.go create mode 100644 pkg/database/ent/metric_create.go create mode 100644 pkg/database/ent/metric_delete.go create mode 100644 pkg/database/ent/metric_query.go create mode 100644 pkg/database/ent/metric_update.go create mode 100644 pkg/database/ent/schema/metric.go diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index d7597d2a449..3b4d619e384 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -36,7 +36,13 @@ type Bouncer struct { // LastPull holds the value of the "last_pull" field. LastPull *time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. - AuthType string `json:"auth_type"` + AuthType string `json:"auth_type"` + // Osname holds the value of the "osname" field. + Osname string `json:"osname,omitempty"` + // Osversion holds the value of the "osversion" field. + Osversion string `json:"osversion,omitempty"` + // Featureflags holds the value of the "featureflags" field. + Featureflags string `json:"featureflags,omitempty"` selectValues sql.SelectValues } @@ -49,7 +55,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case bouncer.FieldID: values[i] = new(sql.NullInt64) - case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType: + case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType, bouncer.FieldOsname, bouncer.FieldOsversion, bouncer.FieldFeatureflags: values[i] = new(sql.NullString) case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldLastPull: values[i] = new(sql.NullTime) @@ -135,6 +141,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.AuthType = value.String } + case bouncer.FieldOsname: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osname", values[i]) + } else if value.Valid { + b.Osname = value.String + } + case bouncer.FieldOsversion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osversion", values[i]) + } else if value.Valid { + b.Osversion = value.String + } + case bouncer.FieldFeatureflags: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featureflags", values[i]) + } else if value.Valid { + b.Featureflags = value.String + } default: b.selectValues.Set(columns[i], values[i]) } @@ -201,6 +225,15 @@ func (b *Bouncer) String() string { builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(b.AuthType) + builder.WriteString(", ") + builder.WriteString("osname=") + builder.WriteString(b.Osname) + builder.WriteString(", ") + builder.WriteString("osversion=") + builder.WriteString(b.Osversion) + builder.WriteString(", ") + builder.WriteString("featureflags=") + builder.WriteString(b.Featureflags) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 59afb199cb5..a6f62aeadd5 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -33,6 +33,12 @@ const ( FieldLastPull = "last_pull" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" + // FieldOsname holds the string denoting the osname field in the database. + FieldOsname = "osname" + // FieldOsversion holds the string denoting the osversion field in the database. + FieldOsversion = "osversion" + // FieldFeatureflags holds the string denoting the featureflags field in the database. + FieldFeatureflags = "featureflags" // Table holds the table name of the bouncer in the database. Table = "bouncers" ) @@ -50,6 +56,9 @@ var Columns = []string{ FieldVersion, FieldLastPull, FieldAuthType, + FieldOsname, + FieldOsversion, + FieldFeatureflags, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -132,3 +141,18 @@ func ByLastPull(opts ...sql.OrderTermOption) OrderOption { func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() } + +// ByOsname orders the results by the osname field. +func ByOsname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsname, opts...).ToFunc() +} + +// ByOsversion orders the results by the osversion field. +func ByOsversion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsversion, opts...).ToFunc() +} + +// ByFeatureflags orders the results by the featureflags field. +func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index e3c5752331e..e02199bc0a9 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -104,6 +104,21 @@ func AuthType(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } +// Osname applies equality check predicate on the "osname" field. It's identical to OsnameEQ. +func Osname(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsname, v)) +} + +// Osversion applies equality check predicate on the "osversion" field. It's identical to OsversionEQ. +func Osversion(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsversion, v)) +} + +// Featureflags applies equality check predicate on the "featureflags" field. It's identical to FeatureflagsEQ. +func Featureflags(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) @@ -664,6 +679,231 @@ func AuthTypeContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldAuthType, v)) } +// OsnameEQ applies the EQ predicate on the "osname" field. +func OsnameEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsname, v)) +} + +// OsnameNEQ applies the NEQ predicate on the "osname" field. +func OsnameNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldOsname, v)) +} + +// OsnameIn applies the In predicate on the "osname" field. +func OsnameIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldOsname, vs...)) +} + +// OsnameNotIn applies the NotIn predicate on the "osname" field. +func OsnameNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldOsname, vs...)) +} + +// OsnameGT applies the GT predicate on the "osname" field. +func OsnameGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldOsname, v)) +} + +// OsnameGTE applies the GTE predicate on the "osname" field. +func OsnameGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldOsname, v)) +} + +// OsnameLT applies the LT predicate on the "osname" field. +func OsnameLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldOsname, v)) +} + +// OsnameLTE applies the LTE predicate on the "osname" field. +func OsnameLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldOsname, v)) +} + +// OsnameContains applies the Contains predicate on the "osname" field. +func OsnameContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldOsname, v)) +} + +// OsnameHasPrefix applies the HasPrefix predicate on the "osname" field. +func OsnameHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldOsname, v)) +} + +// OsnameHasSuffix applies the HasSuffix predicate on the "osname" field. +func OsnameHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldOsname, v)) +} + +// OsnameIsNil applies the IsNil predicate on the "osname" field. +func OsnameIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldOsname)) +} + +// OsnameNotNil applies the NotNil predicate on the "osname" field. +func OsnameNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldOsname)) +} + +// OsnameEqualFold applies the EqualFold predicate on the "osname" field. +func OsnameEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldOsname, v)) +} + +// OsnameContainsFold applies the ContainsFold predicate on the "osname" field. +func OsnameContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldOsname, v)) +} + +// OsversionEQ applies the EQ predicate on the "osversion" field. +func OsversionEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsversion, v)) +} + +// OsversionNEQ applies the NEQ predicate on the "osversion" field. +func OsversionNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldOsversion, v)) +} + +// OsversionIn applies the In predicate on the "osversion" field. +func OsversionIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldOsversion, vs...)) +} + +// OsversionNotIn applies the NotIn predicate on the "osversion" field. +func OsversionNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldOsversion, vs...)) +} + +// OsversionGT applies the GT predicate on the "osversion" field. +func OsversionGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldOsversion, v)) +} + +// OsversionGTE applies the GTE predicate on the "osversion" field. +func OsversionGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldOsversion, v)) +} + +// OsversionLT applies the LT predicate on the "osversion" field. +func OsversionLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldOsversion, v)) +} + +// OsversionLTE applies the LTE predicate on the "osversion" field. +func OsversionLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldOsversion, v)) +} + +// OsversionContains applies the Contains predicate on the "osversion" field. +func OsversionContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldOsversion, v)) +} + +// OsversionHasPrefix applies the HasPrefix predicate on the "osversion" field. +func OsversionHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldOsversion, v)) +} + +// OsversionHasSuffix applies the HasSuffix predicate on the "osversion" field. +func OsversionHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldOsversion, v)) +} + +// OsversionIsNil applies the IsNil predicate on the "osversion" field. +func OsversionIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldOsversion)) +} + +// OsversionNotNil applies the NotNil predicate on the "osversion" field. +func OsversionNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldOsversion)) +} + +// OsversionEqualFold applies the EqualFold predicate on the "osversion" field. +func OsversionEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldOsversion, v)) +} + +// OsversionContainsFold applies the ContainsFold predicate on the "osversion" field. +func OsversionContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldOsversion, v)) +} + +// FeatureflagsEQ applies the EQ predicate on the "featureflags" field. +func FeatureflagsEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) +} + +// FeatureflagsNEQ applies the NEQ predicate on the "featureflags" field. +func FeatureflagsNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldFeatureflags, v)) +} + +// FeatureflagsIn applies the In predicate on the "featureflags" field. +func FeatureflagsIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsNotIn applies the NotIn predicate on the "featureflags" field. +func FeatureflagsNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsGT applies the GT predicate on the "featureflags" field. +func FeatureflagsGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldFeatureflags, v)) +} + +// FeatureflagsGTE applies the GTE predicate on the "featureflags" field. +func FeatureflagsGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldFeatureflags, v)) +} + +// FeatureflagsLT applies the LT predicate on the "featureflags" field. +func FeatureflagsLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldFeatureflags, v)) +} + +// FeatureflagsLTE applies the LTE predicate on the "featureflags" field. +func FeatureflagsLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldFeatureflags, v)) +} + +// FeatureflagsContains applies the Contains predicate on the "featureflags" field. +func FeatureflagsContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldFeatureflags, v)) +} + +// FeatureflagsHasPrefix applies the HasPrefix predicate on the "featureflags" field. +func FeatureflagsHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldFeatureflags, v)) +} + +// FeatureflagsHasSuffix applies the HasSuffix predicate on the "featureflags" field. +func FeatureflagsHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldFeatureflags, v)) +} + +// FeatureflagsIsNil applies the IsNil predicate on the "featureflags" field. +func FeatureflagsIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldFeatureflags)) +} + +// FeatureflagsNotNil applies the NotNil predicate on the "featureflags" field. +func FeatureflagsNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldFeatureflags)) +} + +// FeatureflagsEqualFold applies the EqualFold predicate on the "featureflags" field. +func FeatureflagsEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldFeatureflags, v)) +} + +// FeatureflagsContainsFold applies the ContainsFold predicate on the "featureflags" field. +func FeatureflagsContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldFeatureflags, v)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { return predicate.Bouncer(sql.AndPredicates(predicates...)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index f2dfc767872..29b23f87cf1 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -136,6 +136,48 @@ func (bc *BouncerCreate) SetNillableAuthType(s *string) *BouncerCreate { return bc } +// SetOsname sets the "osname" field. +func (bc *BouncerCreate) SetOsname(s string) *BouncerCreate { + bc.mutation.SetOsname(s) + return bc +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableOsname(s *string) *BouncerCreate { + if s != nil { + bc.SetOsname(*s) + } + return bc +} + +// SetOsversion sets the "osversion" field. +func (bc *BouncerCreate) SetOsversion(s string) *BouncerCreate { + bc.mutation.SetOsversion(s) + return bc +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableOsversion(s *string) *BouncerCreate { + if s != nil { + bc.SetOsversion(*s) + } + return bc +} + +// SetFeatureflags sets the "featureflags" field. +func (bc *BouncerCreate) SetFeatureflags(s string) *BouncerCreate { + bc.mutation.SetFeatureflags(s) + return bc +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableFeatureflags(s *string) *BouncerCreate { + if s != nil { + bc.SetFeatureflags(*s) + } + return bc +} + // Mutation returns the BouncerMutation object of the builder. func (bc *BouncerCreate) Mutation() *BouncerMutation { return bc.mutation @@ -275,6 +317,18 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) _node.AuthType = value } + if value, ok := bc.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + _node.Osname = value + } + if value, ok := bc.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + _node.Osversion = value + } + if value, ok := bc.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + _node.Featureflags = value + } return _node, _spec } diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index 31dd0bd708e..620b006a49a 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -156,6 +156,66 @@ func (bu *BouncerUpdate) SetNillableAuthType(s *string) *BouncerUpdate { return bu } +// SetOsname sets the "osname" field. +func (bu *BouncerUpdate) SetOsname(s string) *BouncerUpdate { + bu.mutation.SetOsname(s) + return bu +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableOsname(s *string) *BouncerUpdate { + if s != nil { + bu.SetOsname(*s) + } + return bu +} + +// ClearOsname clears the value of the "osname" field. +func (bu *BouncerUpdate) ClearOsname() *BouncerUpdate { + bu.mutation.ClearOsname() + return bu +} + +// SetOsversion sets the "osversion" field. +func (bu *BouncerUpdate) SetOsversion(s string) *BouncerUpdate { + bu.mutation.SetOsversion(s) + return bu +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableOsversion(s *string) *BouncerUpdate { + if s != nil { + bu.SetOsversion(*s) + } + return bu +} + +// ClearOsversion clears the value of the "osversion" field. +func (bu *BouncerUpdate) ClearOsversion() *BouncerUpdate { + bu.mutation.ClearOsversion() + return bu +} + +// SetFeatureflags sets the "featureflags" field. +func (bu *BouncerUpdate) SetFeatureflags(s string) *BouncerUpdate { + bu.mutation.SetFeatureflags(s) + return bu +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableFeatureflags(s *string) *BouncerUpdate { + if s != nil { + bu.SetFeatureflags(*s) + } + return bu +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (bu *BouncerUpdate) ClearFeatureflags() *BouncerUpdate { + bu.mutation.ClearFeatureflags() + return bu +} + // Mutation returns the BouncerMutation object of the builder. func (bu *BouncerUpdate) Mutation() *BouncerMutation { return bu.mutation @@ -242,6 +302,24 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } + if value, ok := bu.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + } + if bu.mutation.OsnameCleared() { + _spec.ClearField(bouncer.FieldOsname, field.TypeString) + } + if value, ok := bu.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + } + if bu.mutation.OsversionCleared() { + _spec.ClearField(bouncer.FieldOsversion, field.TypeString) + } + if value, ok := bu.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + } + if bu.mutation.FeatureflagsCleared() { + _spec.ClearField(bouncer.FieldFeatureflags, field.TypeString) + } if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{bouncer.Label} @@ -390,6 +468,66 @@ func (buo *BouncerUpdateOne) SetNillableAuthType(s *string) *BouncerUpdateOne { return buo } +// SetOsname sets the "osname" field. +func (buo *BouncerUpdateOne) SetOsname(s string) *BouncerUpdateOne { + buo.mutation.SetOsname(s) + return buo +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableOsname(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetOsname(*s) + } + return buo +} + +// ClearOsname clears the value of the "osname" field. +func (buo *BouncerUpdateOne) ClearOsname() *BouncerUpdateOne { + buo.mutation.ClearOsname() + return buo +} + +// SetOsversion sets the "osversion" field. +func (buo *BouncerUpdateOne) SetOsversion(s string) *BouncerUpdateOne { + buo.mutation.SetOsversion(s) + return buo +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableOsversion(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetOsversion(*s) + } + return buo +} + +// ClearOsversion clears the value of the "osversion" field. +func (buo *BouncerUpdateOne) ClearOsversion() *BouncerUpdateOne { + buo.mutation.ClearOsversion() + return buo +} + +// SetFeatureflags sets the "featureflags" field. +func (buo *BouncerUpdateOne) SetFeatureflags(s string) *BouncerUpdateOne { + buo.mutation.SetFeatureflags(s) + return buo +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableFeatureflags(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetFeatureflags(*s) + } + return buo +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (buo *BouncerUpdateOne) ClearFeatureflags() *BouncerUpdateOne { + buo.mutation.ClearFeatureflags() + return buo +} + // Mutation returns the BouncerMutation object of the builder. func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { return buo.mutation @@ -506,6 +644,24 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } + if value, ok := buo.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + } + if buo.mutation.OsnameCleared() { + _spec.ClearField(bouncer.FieldOsname, field.TypeString) + } + if value, ok := buo.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + } + if buo.mutation.OsversionCleared() { + _spec.ClearField(bouncer.FieldOsversion, field.TypeString) + } + if value, ok := buo.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + } + if buo.mutation.FeatureflagsCleared() { + _spec.ClearField(bouncer.FieldFeatureflags, field.TypeString) + } _node = &Bouncer{config: buo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 5318109ed42..59686102ebe 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) // Client is the client that holds all ent builders. @@ -46,6 +47,8 @@ type Client struct { Machine *MachineClient // Meta is the client for interacting with the Meta builders. Meta *MetaClient + // Metric is the client for interacting with the Metric builders. + Metric *MetricClient } // NewClient creates a new client configured with the given options. @@ -65,6 +68,7 @@ func (c *Client) init() { c.Lock = NewLockClient(c.config) c.Machine = NewMachineClient(c.config) c.Meta = NewMetaClient(c.config) + c.Metric = NewMetricClient(c.config) } type ( @@ -165,6 +169,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -192,6 +197,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -222,7 +228,7 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, + c.Meta, c.Metric, } { n.Use(hooks...) } @@ -233,7 +239,7 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, + c.Meta, c.Metric, } { n.Intercept(interceptors...) } @@ -258,6 +264,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Machine.mutate(ctx, m) case *MetaMutation: return c.Meta.mutate(ctx, m) + case *MetricMutation: + return c.Metric.mutate(ctx, m) default: return nil, fmt.Errorf("ent: unknown mutation type %T", m) } @@ -1455,13 +1463,147 @@ func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) } } +// MetricClient is a client for the Metric schema. +type MetricClient struct { + config +} + +// NewMetricClient returns a client for the Metric from the given config. +func NewMetricClient(c config) *MetricClient { + return &MetricClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `metric.Hooks(f(g(h())))`. +func (c *MetricClient) Use(hooks ...Hook) { + c.hooks.Metric = append(c.hooks.Metric, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `metric.Intercept(f(g(h())))`. +func (c *MetricClient) Intercept(interceptors ...Interceptor) { + c.inters.Metric = append(c.inters.Metric, interceptors...) +} + +// Create returns a builder for creating a Metric entity. +func (c *MetricClient) Create() *MetricCreate { + mutation := newMetricMutation(c.config, OpCreate) + return &MetricCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Metric entities. +func (c *MetricClient) CreateBulk(builders ...*MetricCreate) *MetricCreateBulk { + return &MetricCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MetricClient) MapCreateBulk(slice any, setFunc func(*MetricCreate, int)) *MetricCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MetricCreateBulk{err: fmt.Errorf("calling to MetricClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MetricCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MetricCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Metric. +func (c *MetricClient) Update() *MetricUpdate { + mutation := newMetricMutation(c.config, OpUpdate) + return &MetricUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MetricClient) UpdateOne(m *Metric) *MetricUpdateOne { + mutation := newMetricMutation(c.config, OpUpdateOne, withMetric(m)) + return &MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MetricClient) UpdateOneID(id int) *MetricUpdateOne { + mutation := newMetricMutation(c.config, OpUpdateOne, withMetricID(id)) + return &MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Metric. +func (c *MetricClient) Delete() *MetricDelete { + mutation := newMetricMutation(c.config, OpDelete) + return &MetricDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MetricClient) DeleteOne(m *Metric) *MetricDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *MetricClient) DeleteOneID(id int) *MetricDeleteOne { + builder := c.Delete().Where(metric.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MetricDeleteOne{builder} +} + +// Query returns a query builder for Metric. +func (c *MetricClient) Query() *MetricQuery { + return &MetricQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeMetric}, + inters: c.Interceptors(), + } +} + +// Get returns a Metric entity by its id. +func (c *MetricClient) Get(ctx context.Context, id int) (*Metric, error) { + return c.Query().Where(metric.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MetricClient) GetX(ctx context.Context, id int) *Metric { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *MetricClient) Hooks() []Hook { + return c.hooks.Metric +} + +// Interceptors returns the client interceptors. +func (c *MetricClient) Interceptors() []Interceptor { + return c.inters.Metric +} + +func (c *MetricClient) mutate(ctx context.Context, m *MetricMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MetricCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MetricUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MetricDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Metric mutation op: %q", m.Op()) + } +} + // hooks and interceptors per client, for fast access. type ( hooks struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta []ent.Hook + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, + Metric []ent.Hook } inters struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, - Meta []ent.Interceptor + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, + Metric []ent.Interceptor } ) diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index cb98ee9301c..2a5ad188197 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) // ent aliases to avoid import conflicts in user's code. @@ -88,6 +89,7 @@ func checkColumn(table, column string) error { lock.Table: lock.ValidColumn, machine.Table: machine.ValidColumn, meta.Table: meta.ValidColumn, + metric.Table: metric.ValidColumn, }) }) return columnCheck(table, column) diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index fdc31539679..62cc07820d0 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -105,6 +105,18 @@ func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) } +// The MetricFunc type is an adapter to allow the use of ordinary +// function as Metric mutator. +type MetricFunc func(context.Context, *ent.MetricMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MetricFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.MetricMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetricMutation", m) +} + // Condition is a hook condition function. type Condition func(context.Context, ent.Mutation) bool diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 475eab72ecd..fddb2e6a8b3 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -3,6 +3,7 @@ package ent import ( + "encoding/json" "fmt" "strings" "time" @@ -10,6 +11,7 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) // Machine is the model entity for the Machine schema. @@ -41,6 +43,16 @@ type Machine struct { Status string `json:"status,omitempty"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` + // Osname holds the value of the "osname" field. + Osname string `json:"osname,omitempty"` + // Osversion holds the value of the "osversion" field. + Osversion string `json:"osversion,omitempty"` + // Featureflags holds the value of the "featureflags" field. + Featureflags string `json:"featureflags,omitempty"` + // Hubstate holds the value of the "hubstate" field. + Hubstate map[string]schema.ItemState `json:"hubstate,omitempty"` + // Datasources holds the value of the "datasources" field. + Datasources map[string]int64 `json:"datasources,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MachineQuery when eager-loading is set. Edges MachineEdges `json:"edges"` @@ -70,11 +82,13 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case machine.FieldHubstate, machine.FieldDatasources: + values[i] = new([]byte) case machine.FieldIsValidated: values[i] = new(sql.NullBool) case machine.FieldID: values[i] = new(sql.NullInt64) - case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType: + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: values[i] = new(sql.NullString) case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) @@ -173,6 +187,40 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.AuthType = value.String } + case machine.FieldOsname: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osname", values[i]) + } else if value.Valid { + m.Osname = value.String + } + case machine.FieldOsversion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osversion", values[i]) + } else if value.Valid { + m.Osversion = value.String + } + case machine.FieldFeatureflags: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featureflags", values[i]) + } else if value.Valid { + m.Featureflags = value.String + } + case machine.FieldHubstate: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field hubstate", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &m.Hubstate); err != nil { + return fmt.Errorf("unmarshal field hubstate: %w", err) + } + } + case machine.FieldDatasources: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field datasources", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &m.Datasources); err != nil { + return fmt.Errorf("unmarshal field datasources: %w", err) + } + } default: m.selectValues.Set(columns[i], values[i]) } @@ -252,6 +300,21 @@ func (m *Machine) String() string { builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(m.AuthType) + builder.WriteString(", ") + builder.WriteString("osname=") + builder.WriteString(m.Osname) + builder.WriteString(", ") + builder.WriteString("osversion=") + builder.WriteString(m.Osversion) + builder.WriteString(", ") + builder.WriteString("featureflags=") + builder.WriteString(m.Featureflags) + builder.WriteString(", ") + builder.WriteString("hubstate=") + builder.WriteString(fmt.Sprintf("%v", m.Hubstate)) + builder.WriteString(", ") + builder.WriteString("datasources=") + builder.WriteString(fmt.Sprintf("%v", m.Datasources)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index d7dece9f8ef..179059edd4d 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -38,6 +38,16 @@ const ( FieldStatus = "status" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" + // FieldOsname holds the string denoting the osname field in the database. + FieldOsname = "osname" + // FieldOsversion holds the string denoting the osversion field in the database. + FieldOsversion = "osversion" + // FieldFeatureflags holds the string denoting the featureflags field in the database. + FieldFeatureflags = "featureflags" + // FieldHubstate holds the string denoting the hubstate field in the database. + FieldHubstate = "hubstate" + // FieldDatasources holds the string denoting the datasources field in the database. + FieldDatasources = "datasources" // EdgeAlerts holds the string denoting the alerts edge name in mutations. EdgeAlerts = "alerts" // Table holds the table name of the machine in the database. @@ -66,6 +76,11 @@ var Columns = []string{ FieldIsValidated, FieldStatus, FieldAuthType, + FieldOsname, + FieldOsversion, + FieldFeatureflags, + FieldHubstate, + FieldDatasources, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -163,6 +178,21 @@ func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() } +// ByOsname orders the results by the osname field. +func ByOsname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsname, opts...).ToFunc() +} + +// ByOsversion orders the results by the osversion field. +func ByOsversion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsversion, opts...).ToFunc() +} + +// ByFeatureflags orders the results by the featureflags field. +func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() +} + // ByAlertsCount orders the results by alerts count. func ByAlertsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index 24c9ab154c8..aca66135f5c 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -115,6 +115,21 @@ func AuthType(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } +// Osname applies equality check predicate on the "osname" field. It's identical to OsnameEQ. +func Osname(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsname, v)) +} + +// Osversion applies equality check predicate on the "osversion" field. It's identical to OsversionEQ. +func Osversion(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsversion, v)) +} + +// Featureflags applies equality check predicate on the "featureflags" field. It's identical to FeatureflagsEQ. +func Featureflags(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldFeatureflags, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) @@ -790,6 +805,251 @@ func AuthTypeContainsFold(v string) predicate.Machine { return predicate.Machine(sql.FieldContainsFold(FieldAuthType, v)) } +// OsnameEQ applies the EQ predicate on the "osname" field. +func OsnameEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsname, v)) +} + +// OsnameNEQ applies the NEQ predicate on the "osname" field. +func OsnameNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldOsname, v)) +} + +// OsnameIn applies the In predicate on the "osname" field. +func OsnameIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldOsname, vs...)) +} + +// OsnameNotIn applies the NotIn predicate on the "osname" field. +func OsnameNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldOsname, vs...)) +} + +// OsnameGT applies the GT predicate on the "osname" field. +func OsnameGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldOsname, v)) +} + +// OsnameGTE applies the GTE predicate on the "osname" field. +func OsnameGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldOsname, v)) +} + +// OsnameLT applies the LT predicate on the "osname" field. +func OsnameLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldOsname, v)) +} + +// OsnameLTE applies the LTE predicate on the "osname" field. +func OsnameLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldOsname, v)) +} + +// OsnameContains applies the Contains predicate on the "osname" field. +func OsnameContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldOsname, v)) +} + +// OsnameHasPrefix applies the HasPrefix predicate on the "osname" field. +func OsnameHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldOsname, v)) +} + +// OsnameHasSuffix applies the HasSuffix predicate on the "osname" field. +func OsnameHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldOsname, v)) +} + +// OsnameIsNil applies the IsNil predicate on the "osname" field. +func OsnameIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldOsname)) +} + +// OsnameNotNil applies the NotNil predicate on the "osname" field. +func OsnameNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldOsname)) +} + +// OsnameEqualFold applies the EqualFold predicate on the "osname" field. +func OsnameEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldOsname, v)) +} + +// OsnameContainsFold applies the ContainsFold predicate on the "osname" field. +func OsnameContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldOsname, v)) +} + +// OsversionEQ applies the EQ predicate on the "osversion" field. +func OsversionEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsversion, v)) +} + +// OsversionNEQ applies the NEQ predicate on the "osversion" field. +func OsversionNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldOsversion, v)) +} + +// OsversionIn applies the In predicate on the "osversion" field. +func OsversionIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldOsversion, vs...)) +} + +// OsversionNotIn applies the NotIn predicate on the "osversion" field. +func OsversionNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldOsversion, vs...)) +} + +// OsversionGT applies the GT predicate on the "osversion" field. +func OsversionGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldOsversion, v)) +} + +// OsversionGTE applies the GTE predicate on the "osversion" field. +func OsversionGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldOsversion, v)) +} + +// OsversionLT applies the LT predicate on the "osversion" field. +func OsversionLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldOsversion, v)) +} + +// OsversionLTE applies the LTE predicate on the "osversion" field. +func OsversionLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldOsversion, v)) +} + +// OsversionContains applies the Contains predicate on the "osversion" field. +func OsversionContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldOsversion, v)) +} + +// OsversionHasPrefix applies the HasPrefix predicate on the "osversion" field. +func OsversionHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldOsversion, v)) +} + +// OsversionHasSuffix applies the HasSuffix predicate on the "osversion" field. +func OsversionHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldOsversion, v)) +} + +// OsversionIsNil applies the IsNil predicate on the "osversion" field. +func OsversionIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldOsversion)) +} + +// OsversionNotNil applies the NotNil predicate on the "osversion" field. +func OsversionNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldOsversion)) +} + +// OsversionEqualFold applies the EqualFold predicate on the "osversion" field. +func OsversionEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldOsversion, v)) +} + +// OsversionContainsFold applies the ContainsFold predicate on the "osversion" field. +func OsversionContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldOsversion, v)) +} + +// FeatureflagsEQ applies the EQ predicate on the "featureflags" field. +func FeatureflagsEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldFeatureflags, v)) +} + +// FeatureflagsNEQ applies the NEQ predicate on the "featureflags" field. +func FeatureflagsNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldFeatureflags, v)) +} + +// FeatureflagsIn applies the In predicate on the "featureflags" field. +func FeatureflagsIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsNotIn applies the NotIn predicate on the "featureflags" field. +func FeatureflagsNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsGT applies the GT predicate on the "featureflags" field. +func FeatureflagsGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldFeatureflags, v)) +} + +// FeatureflagsGTE applies the GTE predicate on the "featureflags" field. +func FeatureflagsGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldFeatureflags, v)) +} + +// FeatureflagsLT applies the LT predicate on the "featureflags" field. +func FeatureflagsLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldFeatureflags, v)) +} + +// FeatureflagsLTE applies the LTE predicate on the "featureflags" field. +func FeatureflagsLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldFeatureflags, v)) +} + +// FeatureflagsContains applies the Contains predicate on the "featureflags" field. +func FeatureflagsContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldFeatureflags, v)) +} + +// FeatureflagsHasPrefix applies the HasPrefix predicate on the "featureflags" field. +func FeatureflagsHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldFeatureflags, v)) +} + +// FeatureflagsHasSuffix applies the HasSuffix predicate on the "featureflags" field. +func FeatureflagsHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldFeatureflags, v)) +} + +// FeatureflagsIsNil applies the IsNil predicate on the "featureflags" field. +func FeatureflagsIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldFeatureflags)) +} + +// FeatureflagsNotNil applies the NotNil predicate on the "featureflags" field. +func FeatureflagsNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldFeatureflags)) +} + +// FeatureflagsEqualFold applies the EqualFold predicate on the "featureflags" field. +func FeatureflagsEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldFeatureflags, v)) +} + +// FeatureflagsContainsFold applies the ContainsFold predicate on the "featureflags" field. +func FeatureflagsContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldFeatureflags, v)) +} + +// HubstateIsNil applies the IsNil predicate on the "hubstate" field. +func HubstateIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldHubstate)) +} + +// HubstateNotNil applies the NotNil predicate on the "hubstate" field. +func HubstateNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldHubstate)) +} + +// DatasourcesIsNil applies the IsNil predicate on the "datasources" field. +func DatasourcesIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldDatasources)) +} + +// DatasourcesNotNil applies the NotNil predicate on the "datasources" field. +func DatasourcesNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldDatasources)) +} + // HasAlerts applies the HasEdge predicate on the "alerts" edge. func HasAlerts() predicate.Machine { return predicate.Machine(func(s *sql.Selector) { diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 2e4cf9f1500..4ae0e5a9d1f 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/schema/field" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) // MachineCreate is the builder for creating a Machine entity. @@ -165,6 +166,60 @@ func (mc *MachineCreate) SetNillableAuthType(s *string) *MachineCreate { return mc } +// SetOsname sets the "osname" field. +func (mc *MachineCreate) SetOsname(s string) *MachineCreate { + mc.mutation.SetOsname(s) + return mc +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (mc *MachineCreate) SetNillableOsname(s *string) *MachineCreate { + if s != nil { + mc.SetOsname(*s) + } + return mc +} + +// SetOsversion sets the "osversion" field. +func (mc *MachineCreate) SetOsversion(s string) *MachineCreate { + mc.mutation.SetOsversion(s) + return mc +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (mc *MachineCreate) SetNillableOsversion(s *string) *MachineCreate { + if s != nil { + mc.SetOsversion(*s) + } + return mc +} + +// SetFeatureflags sets the "featureflags" field. +func (mc *MachineCreate) SetFeatureflags(s string) *MachineCreate { + mc.mutation.SetFeatureflags(s) + return mc +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (mc *MachineCreate) SetNillableFeatureflags(s *string) *MachineCreate { + if s != nil { + mc.SetFeatureflags(*s) + } + return mc +} + +// SetHubstate sets the "hubstate" field. +func (mc *MachineCreate) SetHubstate(ms map[string]schema.ItemState) *MachineCreate { + mc.mutation.SetHubstate(ms) + return mc +} + +// SetDatasources sets the "datasources" field. +func (mc *MachineCreate) SetDatasources(m map[string]int64) *MachineCreate { + mc.mutation.SetDatasources(m) + return mc +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mc *MachineCreate) AddAlertIDs(ids ...int) *MachineCreate { mc.mutation.AddAlertIDs(ids...) @@ -339,6 +394,26 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value } + if value, ok := mc.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + _node.Osname = value + } + if value, ok := mc.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + _node.Osversion = value + } + if value, ok := mc.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + _node.Featureflags = value + } + if value, ok := mc.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + _node.Hubstate = value + } + if value, ok := mc.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + _node.Datasources = value + } if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index 5fbd15220f9..aa0f02542c1 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) // MachineUpdate is the builder for updating Machine entities. @@ -191,6 +192,90 @@ func (mu *MachineUpdate) SetNillableAuthType(s *string) *MachineUpdate { return mu } +// SetOsname sets the "osname" field. +func (mu *MachineUpdate) SetOsname(s string) *MachineUpdate { + mu.mutation.SetOsname(s) + return mu +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableOsname(s *string) *MachineUpdate { + if s != nil { + mu.SetOsname(*s) + } + return mu +} + +// ClearOsname clears the value of the "osname" field. +func (mu *MachineUpdate) ClearOsname() *MachineUpdate { + mu.mutation.ClearOsname() + return mu +} + +// SetOsversion sets the "osversion" field. +func (mu *MachineUpdate) SetOsversion(s string) *MachineUpdate { + mu.mutation.SetOsversion(s) + return mu +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableOsversion(s *string) *MachineUpdate { + if s != nil { + mu.SetOsversion(*s) + } + return mu +} + +// ClearOsversion clears the value of the "osversion" field. +func (mu *MachineUpdate) ClearOsversion() *MachineUpdate { + mu.mutation.ClearOsversion() + return mu +} + +// SetFeatureflags sets the "featureflags" field. +func (mu *MachineUpdate) SetFeatureflags(s string) *MachineUpdate { + mu.mutation.SetFeatureflags(s) + return mu +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableFeatureflags(s *string) *MachineUpdate { + if s != nil { + mu.SetFeatureflags(*s) + } + return mu +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (mu *MachineUpdate) ClearFeatureflags() *MachineUpdate { + mu.mutation.ClearFeatureflags() + return mu +} + +// SetHubstate sets the "hubstate" field. +func (mu *MachineUpdate) SetHubstate(ms map[string]schema.ItemState) *MachineUpdate { + mu.mutation.SetHubstate(ms) + return mu +} + +// ClearHubstate clears the value of the "hubstate" field. +func (mu *MachineUpdate) ClearHubstate() *MachineUpdate { + mu.mutation.ClearHubstate() + return mu +} + +// SetDatasources sets the "datasources" field. +func (mu *MachineUpdate) SetDatasources(m map[string]int64) *MachineUpdate { + mu.mutation.SetDatasources(m) + return mu +} + +// ClearDatasources clears the value of the "datasources" field. +func (mu *MachineUpdate) ClearDatasources() *MachineUpdate { + mu.mutation.ClearDatasources() + return mu +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mu *MachineUpdate) AddAlertIDs(ids ...int) *MachineUpdate { mu.mutation.AddAlertIDs(ids...) @@ -335,6 +420,36 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } + if value, ok := mu.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + } + if mu.mutation.OsnameCleared() { + _spec.ClearField(machine.FieldOsname, field.TypeString) + } + if value, ok := mu.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + } + if mu.mutation.OsversionCleared() { + _spec.ClearField(machine.FieldOsversion, field.TypeString) + } + if value, ok := mu.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + } + if mu.mutation.FeatureflagsCleared() { + _spec.ClearField(machine.FieldFeatureflags, field.TypeString) + } + if value, ok := mu.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + } + if mu.mutation.HubstateCleared() { + _spec.ClearField(machine.FieldHubstate, field.TypeJSON) + } + if value, ok := mu.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + } + if mu.mutation.DatasourcesCleared() { + _spec.ClearField(machine.FieldDatasources, field.TypeJSON) + } if mu.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -562,6 +677,90 @@ func (muo *MachineUpdateOne) SetNillableAuthType(s *string) *MachineUpdateOne { return muo } +// SetOsname sets the "osname" field. +func (muo *MachineUpdateOne) SetOsname(s string) *MachineUpdateOne { + muo.mutation.SetOsname(s) + return muo +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableOsname(s *string) *MachineUpdateOne { + if s != nil { + muo.SetOsname(*s) + } + return muo +} + +// ClearOsname clears the value of the "osname" field. +func (muo *MachineUpdateOne) ClearOsname() *MachineUpdateOne { + muo.mutation.ClearOsname() + return muo +} + +// SetOsversion sets the "osversion" field. +func (muo *MachineUpdateOne) SetOsversion(s string) *MachineUpdateOne { + muo.mutation.SetOsversion(s) + return muo +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableOsversion(s *string) *MachineUpdateOne { + if s != nil { + muo.SetOsversion(*s) + } + return muo +} + +// ClearOsversion clears the value of the "osversion" field. +func (muo *MachineUpdateOne) ClearOsversion() *MachineUpdateOne { + muo.mutation.ClearOsversion() + return muo +} + +// SetFeatureflags sets the "featureflags" field. +func (muo *MachineUpdateOne) SetFeatureflags(s string) *MachineUpdateOne { + muo.mutation.SetFeatureflags(s) + return muo +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableFeatureflags(s *string) *MachineUpdateOne { + if s != nil { + muo.SetFeatureflags(*s) + } + return muo +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (muo *MachineUpdateOne) ClearFeatureflags() *MachineUpdateOne { + muo.mutation.ClearFeatureflags() + return muo +} + +// SetHubstate sets the "hubstate" field. +func (muo *MachineUpdateOne) SetHubstate(ms map[string]schema.ItemState) *MachineUpdateOne { + muo.mutation.SetHubstate(ms) + return muo +} + +// ClearHubstate clears the value of the "hubstate" field. +func (muo *MachineUpdateOne) ClearHubstate() *MachineUpdateOne { + muo.mutation.ClearHubstate() + return muo +} + +// SetDatasources sets the "datasources" field. +func (muo *MachineUpdateOne) SetDatasources(m map[string]int64) *MachineUpdateOne { + muo.mutation.SetDatasources(m) + return muo +} + +// ClearDatasources clears the value of the "datasources" field. +func (muo *MachineUpdateOne) ClearDatasources() *MachineUpdateOne { + muo.mutation.ClearDatasources() + return muo +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (muo *MachineUpdateOne) AddAlertIDs(ids ...int) *MachineUpdateOne { muo.mutation.AddAlertIDs(ids...) @@ -736,6 +935,36 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if value, ok := muo.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } + if value, ok := muo.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + } + if muo.mutation.OsnameCleared() { + _spec.ClearField(machine.FieldOsname, field.TypeString) + } + if value, ok := muo.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + } + if muo.mutation.OsversionCleared() { + _spec.ClearField(machine.FieldOsversion, field.TypeString) + } + if value, ok := muo.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + } + if muo.mutation.FeatureflagsCleared() { + _spec.ClearField(machine.FieldFeatureflags, field.TypeString) + } + if value, ok := muo.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + } + if muo.mutation.HubstateCleared() { + _spec.ClearField(machine.FieldHubstate, field.TypeJSON) + } + if value, ok := muo.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + } + if muo.mutation.DatasourcesCleared() { + _spec.ClearField(machine.FieldDatasources, field.TypeJSON) + } if muo.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go new file mode 100644 index 00000000000..236d54da25d --- /dev/null +++ b/pkg/database/ent/metric.go @@ -0,0 +1,154 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// Metric is the model entity for the Metric schema. +type Metric struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Type of the metrics source: LP=logprocessor, RC=remediation + GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` + // Source of the metrics: machine id, bouncer name... + // It must come from the auth middleware. + GeneratedBy string `json:"generated_by,omitempty"` + // When the metrics are collected/calculated at the source + CollectedAt time.Time `json:"collected_at,omitempty"` + // When the metrics are sent to the console + PushedAt *time.Time `json:"pushed_at,omitempty"` + // The actual metrics (item0) + Payload string `json:"payload,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Metric) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case metric.FieldID: + values[i] = new(sql.NullInt64) + case metric.FieldGeneratedType, metric.FieldGeneratedBy, metric.FieldPayload: + values[i] = new(sql.NullString) + case metric.FieldCollectedAt, metric.FieldPushedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Metric fields. +func (m *Metric) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case metric.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + case metric.FieldGeneratedType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field generated_type", values[i]) + } else if value.Valid { + m.GeneratedType = metric.GeneratedType(value.String) + } + case metric.FieldGeneratedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field generated_by", values[i]) + } else if value.Valid { + m.GeneratedBy = value.String + } + case metric.FieldCollectedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field collected_at", values[i]) + } else if value.Valid { + m.CollectedAt = value.Time + } + case metric.FieldPushedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field pushed_at", values[i]) + } else if value.Valid { + m.PushedAt = new(time.Time) + *m.PushedAt = value.Time + } + case metric.FieldPayload: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field payload", values[i]) + } else if value.Valid { + m.Payload = value.String + } + default: + m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Metric. +// This includes values selected through modifiers, order, etc. +func (m *Metric) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + +// Update returns a builder for updating this Metric. +// Note that you need to call Metric.Unwrap() before calling this method if this Metric +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Metric) Update() *MetricUpdateOne { + return NewMetricClient(m.config).UpdateOne(m) +} + +// Unwrap unwraps the Metric entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Metric) Unwrap() *Metric { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Metric is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Metric) String() string { + var builder strings.Builder + builder.WriteString("Metric(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + builder.WriteString("generated_type=") + builder.WriteString(fmt.Sprintf("%v", m.GeneratedType)) + builder.WriteString(", ") + builder.WriteString("generated_by=") + builder.WriteString(m.GeneratedBy) + builder.WriteString(", ") + builder.WriteString("collected_at=") + builder.WriteString(m.CollectedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := m.PushedAt; v != nil { + builder.WriteString("pushed_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("payload=") + builder.WriteString(m.Payload) + builder.WriteByte(')') + return builder.String() +} + +// Metrics is a parsable slice of Metric. +type Metrics []*Metric diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go new file mode 100644 index 00000000000..879f1006d64 --- /dev/null +++ b/pkg/database/ent/metric/metric.go @@ -0,0 +1,104 @@ +// Code generated by ent, DO NOT EDIT. + +package metric + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the metric type in the database. + Label = "metric" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldGeneratedType holds the string denoting the generated_type field in the database. + FieldGeneratedType = "generated_type" + // FieldGeneratedBy holds the string denoting the generated_by field in the database. + FieldGeneratedBy = "generated_by" + // FieldCollectedAt holds the string denoting the collected_at field in the database. + FieldCollectedAt = "collected_at" + // FieldPushedAt holds the string denoting the pushed_at field in the database. + FieldPushedAt = "pushed_at" + // FieldPayload holds the string denoting the payload field in the database. + FieldPayload = "payload" + // Table holds the table name of the metric in the database. + Table = "metrics" +) + +// Columns holds all SQL columns for metric fields. +var Columns = []string{ + FieldID, + FieldGeneratedType, + FieldGeneratedBy, + FieldCollectedAt, + FieldPushedAt, + FieldPayload, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// GeneratedType defines the type for the "generated_type" enum field. +type GeneratedType string + +// GeneratedType values. +const ( + GeneratedTypeLP GeneratedType = "LP" + GeneratedTypeRC GeneratedType = "RC" +) + +func (gt GeneratedType) String() string { + return string(gt) +} + +// GeneratedTypeValidator is a validator for the "generated_type" field enum values. It is called by the builders before save. +func GeneratedTypeValidator(gt GeneratedType) error { + switch gt { + case GeneratedTypeLP, GeneratedTypeRC: + return nil + default: + return fmt.Errorf("metric: invalid enum value for generated_type field: %q", gt) + } +} + +// OrderOption defines the ordering options for the Metric queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByGeneratedType orders the results by the generated_type field. +func ByGeneratedType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeneratedType, opts...).ToFunc() +} + +// ByGeneratedBy orders the results by the generated_by field. +func ByGeneratedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeneratedBy, opts...).ToFunc() +} + +// ByCollectedAt orders the results by the collected_at field. +func ByCollectedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCollectedAt, opts...).ToFunc() +} + +// ByPushedAt orders the results by the pushed_at field. +func ByPushedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPushedAt, opts...).ToFunc() +} + +// ByPayload orders the results by the payload field. +func ByPayload(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPayload, opts...).ToFunc() +} diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go new file mode 100644 index 00000000000..e49f80f3411 --- /dev/null +++ b/pkg/database/ent/metric/where.go @@ -0,0 +1,330 @@ +// Code generated by ent, DO NOT EDIT. + +package metric + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldID, id)) +} + +// GeneratedBy applies equality check predicate on the "generated_by" field. It's identical to GeneratedByEQ. +func GeneratedBy(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) +} + +// CollectedAt applies equality check predicate on the "collected_at" field. It's identical to CollectedAtEQ. +func CollectedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +} + +// PushedAt applies equality check predicate on the "pushed_at" field. It's identical to PushedAtEQ. +func PushedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) +} + +// Payload applies equality check predicate on the "payload" field. It's identical to PayloadEQ. +func Payload(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPayload, v)) +} + +// GeneratedTypeEQ applies the EQ predicate on the "generated_type" field. +func GeneratedTypeEQ(v GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedType, v)) +} + +// GeneratedTypeNEQ applies the NEQ predicate on the "generated_type" field. +func GeneratedTypeNEQ(v GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldGeneratedType, v)) +} + +// GeneratedTypeIn applies the In predicate on the "generated_type" field. +func GeneratedTypeIn(vs ...GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldGeneratedType, vs...)) +} + +// GeneratedTypeNotIn applies the NotIn predicate on the "generated_type" field. +func GeneratedTypeNotIn(vs ...GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldGeneratedType, vs...)) +} + +// GeneratedByEQ applies the EQ predicate on the "generated_by" field. +func GeneratedByEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) +} + +// GeneratedByNEQ applies the NEQ predicate on the "generated_by" field. +func GeneratedByNEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldGeneratedBy, v)) +} + +// GeneratedByIn applies the In predicate on the "generated_by" field. +func GeneratedByIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldGeneratedBy, vs...)) +} + +// GeneratedByNotIn applies the NotIn predicate on the "generated_by" field. +func GeneratedByNotIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldGeneratedBy, vs...)) +} + +// GeneratedByGT applies the GT predicate on the "generated_by" field. +func GeneratedByGT(v string) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldGeneratedBy, v)) +} + +// GeneratedByGTE applies the GTE predicate on the "generated_by" field. +func GeneratedByGTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldGeneratedBy, v)) +} + +// GeneratedByLT applies the LT predicate on the "generated_by" field. +func GeneratedByLT(v string) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldGeneratedBy, v)) +} + +// GeneratedByLTE applies the LTE predicate on the "generated_by" field. +func GeneratedByLTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldGeneratedBy, v)) +} + +// GeneratedByContains applies the Contains predicate on the "generated_by" field. +func GeneratedByContains(v string) predicate.Metric { + return predicate.Metric(sql.FieldContains(FieldGeneratedBy, v)) +} + +// GeneratedByHasPrefix applies the HasPrefix predicate on the "generated_by" field. +func GeneratedByHasPrefix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasPrefix(FieldGeneratedBy, v)) +} + +// GeneratedByHasSuffix applies the HasSuffix predicate on the "generated_by" field. +func GeneratedByHasSuffix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasSuffix(FieldGeneratedBy, v)) +} + +// GeneratedByEqualFold applies the EqualFold predicate on the "generated_by" field. +func GeneratedByEqualFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldEqualFold(FieldGeneratedBy, v)) +} + +// GeneratedByContainsFold applies the ContainsFold predicate on the "generated_by" field. +func GeneratedByContainsFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldContainsFold(FieldGeneratedBy, v)) +} + +// CollectedAtEQ applies the EQ predicate on the "collected_at" field. +func CollectedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +} + +// CollectedAtNEQ applies the NEQ predicate on the "collected_at" field. +func CollectedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldCollectedAt, v)) +} + +// CollectedAtIn applies the In predicate on the "collected_at" field. +func CollectedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldCollectedAt, vs...)) +} + +// CollectedAtNotIn applies the NotIn predicate on the "collected_at" field. +func CollectedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldCollectedAt, vs...)) +} + +// CollectedAtGT applies the GT predicate on the "collected_at" field. +func CollectedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldCollectedAt, v)) +} + +// CollectedAtGTE applies the GTE predicate on the "collected_at" field. +func CollectedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldCollectedAt, v)) +} + +// CollectedAtLT applies the LT predicate on the "collected_at" field. +func CollectedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldCollectedAt, v)) +} + +// CollectedAtLTE applies the LTE predicate on the "collected_at" field. +func CollectedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldCollectedAt, v)) +} + +// PushedAtEQ applies the EQ predicate on the "pushed_at" field. +func PushedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) +} + +// PushedAtNEQ applies the NEQ predicate on the "pushed_at" field. +func PushedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldPushedAt, v)) +} + +// PushedAtIn applies the In predicate on the "pushed_at" field. +func PushedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldPushedAt, vs...)) +} + +// PushedAtNotIn applies the NotIn predicate on the "pushed_at" field. +func PushedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldPushedAt, vs...)) +} + +// PushedAtGT applies the GT predicate on the "pushed_at" field. +func PushedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldPushedAt, v)) +} + +// PushedAtGTE applies the GTE predicate on the "pushed_at" field. +func PushedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldPushedAt, v)) +} + +// PushedAtLT applies the LT predicate on the "pushed_at" field. +func PushedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldPushedAt, v)) +} + +// PushedAtLTE applies the LTE predicate on the "pushed_at" field. +func PushedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldPushedAt, v)) +} + +// PushedAtIsNil applies the IsNil predicate on the "pushed_at" field. +func PushedAtIsNil() predicate.Metric { + return predicate.Metric(sql.FieldIsNull(FieldPushedAt)) +} + +// PushedAtNotNil applies the NotNil predicate on the "pushed_at" field. +func PushedAtNotNil() predicate.Metric { + return predicate.Metric(sql.FieldNotNull(FieldPushedAt)) +} + +// PayloadEQ applies the EQ predicate on the "payload" field. +func PayloadEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPayload, v)) +} + +// PayloadNEQ applies the NEQ predicate on the "payload" field. +func PayloadNEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldPayload, v)) +} + +// PayloadIn applies the In predicate on the "payload" field. +func PayloadIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldPayload, vs...)) +} + +// PayloadNotIn applies the NotIn predicate on the "payload" field. +func PayloadNotIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldPayload, vs...)) +} + +// PayloadGT applies the GT predicate on the "payload" field. +func PayloadGT(v string) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldPayload, v)) +} + +// PayloadGTE applies the GTE predicate on the "payload" field. +func PayloadGTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldPayload, v)) +} + +// PayloadLT applies the LT predicate on the "payload" field. +func PayloadLT(v string) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldPayload, v)) +} + +// PayloadLTE applies the LTE predicate on the "payload" field. +func PayloadLTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldPayload, v)) +} + +// PayloadContains applies the Contains predicate on the "payload" field. +func PayloadContains(v string) predicate.Metric { + return predicate.Metric(sql.FieldContains(FieldPayload, v)) +} + +// PayloadHasPrefix applies the HasPrefix predicate on the "payload" field. +func PayloadHasPrefix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasPrefix(FieldPayload, v)) +} + +// PayloadHasSuffix applies the HasSuffix predicate on the "payload" field. +func PayloadHasSuffix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasSuffix(FieldPayload, v)) +} + +// PayloadEqualFold applies the EqualFold predicate on the "payload" field. +func PayloadEqualFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldEqualFold(FieldPayload, v)) +} + +// PayloadContainsFold applies the ContainsFold predicate on the "payload" field. +func PayloadContainsFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldContainsFold(FieldPayload, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Metric) predicate.Metric { + return predicate.Metric(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Metric) predicate.Metric { + return predicate.Metric(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Metric) predicate.Metric { + return predicate.Metric(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go new file mode 100644 index 00000000000..8fa656db427 --- /dev/null +++ b/pkg/database/ent/metric_create.go @@ -0,0 +1,246 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// MetricCreate is the builder for creating a Metric entity. +type MetricCreate struct { + config + mutation *MetricMutation + hooks []Hook +} + +// SetGeneratedType sets the "generated_type" field. +func (mc *MetricCreate) SetGeneratedType(mt metric.GeneratedType) *MetricCreate { + mc.mutation.SetGeneratedType(mt) + return mc +} + +// SetGeneratedBy sets the "generated_by" field. +func (mc *MetricCreate) SetGeneratedBy(s string) *MetricCreate { + mc.mutation.SetGeneratedBy(s) + return mc +} + +// SetCollectedAt sets the "collected_at" field. +func (mc *MetricCreate) SetCollectedAt(t time.Time) *MetricCreate { + mc.mutation.SetCollectedAt(t) + return mc +} + +// SetPushedAt sets the "pushed_at" field. +func (mc *MetricCreate) SetPushedAt(t time.Time) *MetricCreate { + mc.mutation.SetPushedAt(t) + return mc +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (mc *MetricCreate) SetNillablePushedAt(t *time.Time) *MetricCreate { + if t != nil { + mc.SetPushedAt(*t) + } + return mc +} + +// SetPayload sets the "payload" field. +func (mc *MetricCreate) SetPayload(s string) *MetricCreate { + mc.mutation.SetPayload(s) + return mc +} + +// Mutation returns the MetricMutation object of the builder. +func (mc *MetricCreate) Mutation() *MetricMutation { + return mc.mutation +} + +// Save creates the Metric in the database. +func (mc *MetricCreate) Save(ctx context.Context) (*Metric, error) { + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MetricCreate) SaveX(ctx context.Context) *Metric { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MetricCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MetricCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MetricCreate) check() error { + if _, ok := mc.mutation.GeneratedType(); !ok { + return &ValidationError{Name: "generated_type", err: errors.New(`ent: missing required field "Metric.generated_type"`)} + } + if v, ok := mc.mutation.GeneratedType(); ok { + if err := metric.GeneratedTypeValidator(v); err != nil { + return &ValidationError{Name: "generated_type", err: fmt.Errorf(`ent: validator failed for field "Metric.generated_type": %w`, err)} + } + } + if _, ok := mc.mutation.GeneratedBy(); !ok { + return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} + } + if _, ok := mc.mutation.CollectedAt(); !ok { + return &ValidationError{Name: "collected_at", err: errors.New(`ent: missing required field "Metric.collected_at"`)} + } + if _, ok := mc.mutation.Payload(); !ok { + return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} + } + return nil +} + +func (mc *MetricCreate) sqlSave(ctx context.Context) (*Metric, error) { + if err := mc.check(); err != nil { + return nil, err + } + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true + return _node, nil +} + +func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { + var ( + _node = &Metric{config: mc.config} + _spec = sqlgraph.NewCreateSpec(metric.Table, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + ) + if value, ok := mc.mutation.GeneratedType(); ok { + _spec.SetField(metric.FieldGeneratedType, field.TypeEnum, value) + _node.GeneratedType = value + } + if value, ok := mc.mutation.GeneratedBy(); ok { + _spec.SetField(metric.FieldGeneratedBy, field.TypeString, value) + _node.GeneratedBy = value + } + if value, ok := mc.mutation.CollectedAt(); ok { + _spec.SetField(metric.FieldCollectedAt, field.TypeTime, value) + _node.CollectedAt = value + } + if value, ok := mc.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + _node.PushedAt = &value + } + if value, ok := mc.mutation.Payload(); ok { + _spec.SetField(metric.FieldPayload, field.TypeString, value) + _node.Payload = value + } + return _node, _spec +} + +// MetricCreateBulk is the builder for creating many Metric entities in bulk. +type MetricCreateBulk struct { + config + err error + builders []*MetricCreate +} + +// Save creates the Metric entities in the database. +func (mcb *MetricCreateBulk) Save(ctx context.Context) ([]*Metric, error) { + if mcb.err != nil { + return nil, mcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Metric, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetricMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MetricCreateBulk) SaveX(ctx context.Context) []*Metric { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MetricCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MetricCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/metric_delete.go b/pkg/database/ent/metric_delete.go new file mode 100644 index 00000000000..d6606680a6a --- /dev/null +++ b/pkg/database/ent/metric_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricDelete is the builder for deleting a Metric entity. +type MetricDelete struct { + config + hooks []Hook + mutation *MetricMutation +} + +// Where appends a list predicates to the MetricDelete builder. +func (md *MetricDelete) Where(ps ...predicate.Metric) *MetricDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MetricDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MetricDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MetricDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(metric.Table, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + md.mutation.done = true + return affected, err +} + +// MetricDeleteOne is the builder for deleting a single Metric entity. +type MetricDeleteOne struct { + md *MetricDelete +} + +// Where appends a list predicates to the MetricDelete builder. +func (mdo *MetricDeleteOne) Where(ps ...predicate.Metric) *MetricDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + +// Exec executes the deletion query. +func (mdo *MetricDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{metric.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MetricDeleteOne) ExecX(ctx context.Context) { + if err := mdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/metric_query.go b/pkg/database/ent/metric_query.go new file mode 100644 index 00000000000..6e1c6f08b4a --- /dev/null +++ b/pkg/database/ent/metric_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricQuery is the builder for querying Metric entities. +type MetricQuery struct { + config + ctx *QueryContext + order []metric.OrderOption + inters []Interceptor + predicates []predicate.Metric + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MetricQuery builder. +func (mq *MetricQuery) Where(ps ...predicate.Metric) *MetricQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit the number of records to be returned by this query. +func (mq *MetricQuery) Limit(limit int) *MetricQuery { + mq.ctx.Limit = &limit + return mq +} + +// Offset to start from. +func (mq *MetricQuery) Offset(offset int) *MetricQuery { + mq.ctx.Offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MetricQuery) Unique(unique bool) *MetricQuery { + mq.ctx.Unique = &unique + return mq +} + +// Order specifies how the records should be ordered. +func (mq *MetricQuery) Order(o ...metric.OrderOption) *MetricQuery { + mq.order = append(mq.order, o...) + return mq +} + +// First returns the first Metric entity from the query. +// Returns a *NotFoundError when no Metric was found. +func (mq *MetricQuery) First(ctx context.Context) (*Metric, error) { + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{metric.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MetricQuery) FirstX(ctx context.Context) *Metric { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Metric ID from the query. +// Returns a *NotFoundError when no Metric ID was found. +func (mq *MetricQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{metric.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MetricQuery) FirstIDX(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Metric entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Metric entity is found. +// Returns a *NotFoundError when no Metric entities are found. +func (mq *MetricQuery) Only(ctx context.Context) (*Metric, error) { + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{metric.Label} + default: + return nil, &NotSingularError{metric.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MetricQuery) OnlyX(ctx context.Context) *Metric { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Metric ID in the query. +// Returns a *NotSingularError when more than one Metric ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MetricQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{metric.Label} + default: + err = &NotSingularError{metric.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MetricQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Metrics. +func (mq *MetricQuery) All(ctx context.Context) ([]*Metric, error) { + ctx = setContextOp(ctx, mq.ctx, "All") + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Metric, *MetricQuery]() + return withInterceptors[[]*Metric](ctx, mq, qr, mq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MetricQuery) AllX(ctx context.Context) []*Metric { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Metric IDs. +func (mq *MetricQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(metric.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MetricQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MetricQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, mq, querierCount[*MetricQuery](), mq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MetricQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MetricQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MetricQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MetricQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MetricQuery) Clone() *MetricQuery { + if mq == nil { + return nil + } + return &MetricQuery{ + config: mq.config, + ctx: mq.ctx.Clone(), + order: append([]metric.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), + predicates: append([]predicate.Metric{}, mq.predicates...), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Metric.Query(). +// GroupBy(metric.FieldGeneratedType). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MetricQuery) GroupBy(field string, fields ...string) *MetricGroupBy { + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MetricGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields + grbuild.label = metric.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` +// } +// +// client.Metric.Query(). +// Select(metric.FieldGeneratedType). +// Scan(ctx, &v) +func (mq *MetricQuery) Select(fields ...string) *MetricSelect { + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MetricSelect{MetricQuery: mq} + sbuild.label = metric.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MetricSelect configured with the given aggregations. +func (mq *MetricQuery) Aggregate(fns ...AggregateFunc) *MetricSelect { + return mq.Select().Aggregate(fns...) +} + +func (mq *MetricQuery) prepareQuery(ctx context.Context) error { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { + if !metric.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MetricQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Metric, error) { + var ( + nodes = []*Metric{} + _spec = mq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Metric).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Metric{config: mq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (mq *MetricQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MetricQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true + } + if fields := mq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, metric.FieldID) + for i := range fields { + if fields[i] != metric.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MetricQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(metric.Table) + columns := mq.ctx.Fields + if len(columns) == 0 { + columns = metric.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.ctx.Unique != nil && *mq.ctx.Unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MetricGroupBy is the group-by builder for Metric entities. +type MetricGroupBy struct { + selector + build *MetricQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MetricGroupBy) Aggregate(fns ...AggregateFunc) *MetricGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the selector query and scans the result into the given value. +func (mgb *MetricGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MetricQuery, *MetricGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) +} + +func (mgb *MetricGroupBy) sqlScan(ctx context.Context, root *MetricQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*mgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// MetricSelect is the builder for selecting fields of Metric entities. +type MetricSelect struct { + *MetricQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MetricSelect) Aggregate(fns ...AggregateFunc) *MetricSelect { + ms.fns = append(ms.fns, fns...) + return ms +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MetricSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") + if err := ms.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MetricQuery, *MetricSelect](ctx, ms.MetricQuery, ms, ms.inters, v) +} + +func (ms *MetricSelect) sqlScan(ctx context.Context, root *MetricQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/metric_update.go b/pkg/database/ent/metric_update.go new file mode 100644 index 00000000000..4da33dd6ce9 --- /dev/null +++ b/pkg/database/ent/metric_update.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricUpdate is the builder for updating Metric entities. +type MetricUpdate struct { + config + hooks []Hook + mutation *MetricMutation +} + +// Where appends a list predicates to the MetricUpdate builder. +func (mu *MetricUpdate) Where(ps ...predicate.Metric) *MetricUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetPushedAt sets the "pushed_at" field. +func (mu *MetricUpdate) SetPushedAt(t time.Time) *MetricUpdate { + mu.mutation.SetPushedAt(t) + return mu +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (mu *MetricUpdate) SetNillablePushedAt(t *time.Time) *MetricUpdate { + if t != nil { + mu.SetPushedAt(*t) + } + return mu +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (mu *MetricUpdate) ClearPushedAt() *MetricUpdate { + mu.mutation.ClearPushedAt() + return mu +} + +// Mutation returns the MetricMutation object of the builder. +func (mu *MetricUpdate) Mutation() *MetricMutation { + return mu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MetricUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MetricUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MetricUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MetricUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +func (mu *MetricUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + } + if mu.mutation.PushedAtCleared() { + _spec.ClearField(metric.FieldPushedAt, field.TypeTime) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{metric.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + mu.mutation.done = true + return n, nil +} + +// MetricUpdateOne is the builder for updating a single Metric entity. +type MetricUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MetricMutation +} + +// SetPushedAt sets the "pushed_at" field. +func (muo *MetricUpdateOne) SetPushedAt(t time.Time) *MetricUpdateOne { + muo.mutation.SetPushedAt(t) + return muo +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (muo *MetricUpdateOne) SetNillablePushedAt(t *time.Time) *MetricUpdateOne { + if t != nil { + muo.SetPushedAt(*t) + } + return muo +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (muo *MetricUpdateOne) ClearPushedAt() *MetricUpdateOne { + muo.mutation.ClearPushedAt() + return muo +} + +// Mutation returns the MetricMutation object of the builder. +func (muo *MetricUpdateOne) Mutation() *MetricMutation { + return muo.mutation +} + +// Where appends a list predicates to the MetricUpdate builder. +func (muo *MetricUpdateOne) Where(ps ...predicate.Metric) *MetricUpdateOne { + muo.mutation.Where(ps...) + return muo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MetricUpdateOne) Select(field string, fields ...string) *MetricUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Metric entity. +func (muo *MetricUpdateOne) Save(ctx context.Context) (*Metric, error) { + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MetricUpdateOne) SaveX(ctx context.Context) *Metric { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MetricUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MetricUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +func (muo *MetricUpdateOne) sqlSave(ctx context.Context) (_node *Metric, err error) { + _spec := sqlgraph.NewUpdateSpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Metric.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, metric.FieldID) + for _, f := range fields { + if !metric.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != metric.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + } + if muo.mutation.PushedAtCleared() { + _spec.ClearField(metric.FieldPushedAt, field.TypeTime) + } + _node = &Metric{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{metric.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + muo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 584e848f09e..5c32c472403 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -70,6 +70,9 @@ var ( {Name: "version", Type: field.TypeString, Nullable: true}, {Name: "last_pull", Type: field.TypeTime, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, + {Name: "osname", Type: field.TypeString, Nullable: true}, + {Name: "osversion", Type: field.TypeString, Nullable: true}, + {Name: "featureflags", Type: field.TypeString, Nullable: true}, } // BouncersTable holds the schema information for the "bouncers" table. BouncersTable = &schema.Table{ @@ -204,6 +207,11 @@ var ( {Name: "is_validated", Type: field.TypeBool, Default: false}, {Name: "status", Type: field.TypeString, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "password"}, + {Name: "osname", Type: field.TypeString, Nullable: true}, + {Name: "osversion", Type: field.TypeString, Nullable: true}, + {Name: "featureflags", Type: field.TypeString, Nullable: true}, + {Name: "hubstate", Type: field.TypeJSON, Nullable: true}, + {Name: "datasources", Type: field.TypeJSON, Nullable: true}, } // MachinesTable holds the schema information for the "machines" table. MachinesTable = &schema.Table{ @@ -241,6 +249,28 @@ var ( }, }, } + // MetricsColumns holds the columns for the "metrics" table. + MetricsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, + {Name: "generated_by", Type: field.TypeString}, + {Name: "collected_at", Type: field.TypeTime}, + {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, + {Name: "payload", Type: field.TypeString}, + } + // MetricsTable holds the schema information for the "metrics" table. + MetricsTable = &schema.Table{ + Name: "metrics", + Columns: MetricsColumns, + PrimaryKey: []*schema.Column{MetricsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "metric_generated_type_generated_by_collected_at", + Unique: true, + Columns: []*schema.Column{MetricsColumns[1], MetricsColumns[2], MetricsColumns[3]}, + }, + }, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ AlertsTable, @@ -251,6 +281,7 @@ var ( LocksTable, MachinesTable, MetaTable, + MetricsTable, } ) diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index c012e870c8f..8d109ece379 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -19,7 +19,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) const ( @@ -39,6 +41,7 @@ const ( TypeLock = "Lock" TypeMachine = "Machine" TypeMeta = "Meta" + TypeMetric = "Metric" ) // AlertMutation represents an operation that mutates the Alert nodes in the graph. @@ -2392,6 +2395,9 @@ type BouncerMutation struct { version *string last_pull *time.Time auth_type *string + osname *string + osversion *string + featureflags *string clearedFields map[string]struct{} done bool oldValue func(context.Context) (*Bouncer, error) @@ -2908,6 +2914,153 @@ func (m *BouncerMutation) ResetAuthType() { m.auth_type = nil } +// SetOsname sets the "osname" field. +func (m *BouncerMutation) SetOsname(s string) { + m.osname = &s +} + +// Osname returns the value of the "osname" field in the mutation. +func (m *BouncerMutation) Osname() (r string, exists bool) { + v := m.osname + if v == nil { + return + } + return *v, true +} + +// OldOsname returns the old "osname" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldOsname(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsname is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsname requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsname: %w", err) + } + return oldValue.Osname, nil +} + +// ClearOsname clears the value of the "osname" field. +func (m *BouncerMutation) ClearOsname() { + m.osname = nil + m.clearedFields[bouncer.FieldOsname] = struct{}{} +} + +// OsnameCleared returns if the "osname" field was cleared in this mutation. +func (m *BouncerMutation) OsnameCleared() bool { + _, ok := m.clearedFields[bouncer.FieldOsname] + return ok +} + +// ResetOsname resets all changes to the "osname" field. +func (m *BouncerMutation) ResetOsname() { + m.osname = nil + delete(m.clearedFields, bouncer.FieldOsname) +} + +// SetOsversion sets the "osversion" field. +func (m *BouncerMutation) SetOsversion(s string) { + m.osversion = &s +} + +// Osversion returns the value of the "osversion" field in the mutation. +func (m *BouncerMutation) Osversion() (r string, exists bool) { + v := m.osversion + if v == nil { + return + } + return *v, true +} + +// OldOsversion returns the old "osversion" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldOsversion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsversion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsversion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsversion: %w", err) + } + return oldValue.Osversion, nil +} + +// ClearOsversion clears the value of the "osversion" field. +func (m *BouncerMutation) ClearOsversion() { + m.osversion = nil + m.clearedFields[bouncer.FieldOsversion] = struct{}{} +} + +// OsversionCleared returns if the "osversion" field was cleared in this mutation. +func (m *BouncerMutation) OsversionCleared() bool { + _, ok := m.clearedFields[bouncer.FieldOsversion] + return ok +} + +// ResetOsversion resets all changes to the "osversion" field. +func (m *BouncerMutation) ResetOsversion() { + m.osversion = nil + delete(m.clearedFields, bouncer.FieldOsversion) +} + +// SetFeatureflags sets the "featureflags" field. +func (m *BouncerMutation) SetFeatureflags(s string) { + m.featureflags = &s +} + +// Featureflags returns the value of the "featureflags" field in the mutation. +func (m *BouncerMutation) Featureflags() (r string, exists bool) { + v := m.featureflags + if v == nil { + return + } + return *v, true +} + +// OldFeatureflags returns the old "featureflags" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldFeatureflags(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatureflags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatureflags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatureflags: %w", err) + } + return oldValue.Featureflags, nil +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (m *BouncerMutation) ClearFeatureflags() { + m.featureflags = nil + m.clearedFields[bouncer.FieldFeatureflags] = struct{}{} +} + +// FeatureflagsCleared returns if the "featureflags" field was cleared in this mutation. +func (m *BouncerMutation) FeatureflagsCleared() bool { + _, ok := m.clearedFields[bouncer.FieldFeatureflags] + return ok +} + +// ResetFeatureflags resets all changes to the "featureflags" field. +func (m *BouncerMutation) ResetFeatureflags() { + m.featureflags = nil + delete(m.clearedFields, bouncer.FieldFeatureflags) +} + // Where appends a list predicates to the BouncerMutation builder. func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) @@ -2942,7 +3095,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 10) + fields := make([]string, 0, 13) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -2973,6 +3126,15 @@ func (m *BouncerMutation) Fields() []string { if m.auth_type != nil { fields = append(fields, bouncer.FieldAuthType) } + if m.osname != nil { + fields = append(fields, bouncer.FieldOsname) + } + if m.osversion != nil { + fields = append(fields, bouncer.FieldOsversion) + } + if m.featureflags != nil { + fields = append(fields, bouncer.FieldFeatureflags) + } return fields } @@ -3001,6 +3163,12 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.LastPull() case bouncer.FieldAuthType: return m.AuthType() + case bouncer.FieldOsname: + return m.Osname() + case bouncer.FieldOsversion: + return m.Osversion() + case bouncer.FieldFeatureflags: + return m.Featureflags() } return nil, false } @@ -3030,6 +3198,12 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldLastPull(ctx) case bouncer.FieldAuthType: return m.OldAuthType(ctx) + case bouncer.FieldOsname: + return m.OldOsname(ctx) + case bouncer.FieldOsversion: + return m.OldOsversion(ctx) + case bouncer.FieldFeatureflags: + return m.OldFeatureflags(ctx) } return nil, fmt.Errorf("unknown Bouncer field %s", name) } @@ -3109,6 +3283,27 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetAuthType(v) return nil + case bouncer.FieldOsname: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsname(v) + return nil + case bouncer.FieldOsversion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsversion(v) + return nil + case bouncer.FieldFeatureflags: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatureflags(v) + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -3151,6 +3346,15 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldLastPull) { fields = append(fields, bouncer.FieldLastPull) } + if m.FieldCleared(bouncer.FieldOsname) { + fields = append(fields, bouncer.FieldOsname) + } + if m.FieldCleared(bouncer.FieldOsversion) { + fields = append(fields, bouncer.FieldOsversion) + } + if m.FieldCleared(bouncer.FieldFeatureflags) { + fields = append(fields, bouncer.FieldFeatureflags) + } return fields } @@ -3177,6 +3381,15 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldLastPull: m.ClearLastPull() return nil + case bouncer.FieldOsname: + m.ClearOsname() + return nil + case bouncer.FieldOsversion: + m.ClearOsversion() + return nil + case bouncer.FieldFeatureflags: + m.ClearFeatureflags() + return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } @@ -3215,6 +3428,15 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldAuthType: m.ResetAuthType() return nil + case bouncer.FieldOsname: + m.ResetOsname() + return nil + case bouncer.FieldOsversion: + m.ResetOsversion() + return nil + case bouncer.FieldFeatureflags: + m.ResetFeatureflags() + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -6318,6 +6540,11 @@ type MachineMutation struct { isValidated *bool status *string auth_type *string + osname *string + osversion *string + featureflags *string + hubstate *map[string]schema.ItemState + datasources *map[string]int64 clearedFields map[string]struct{} alerts map[int]struct{} removedalerts map[int]struct{} @@ -6922,6 +7149,251 @@ func (m *MachineMutation) ResetAuthType() { m.auth_type = nil } +// SetOsname sets the "osname" field. +func (m *MachineMutation) SetOsname(s string) { + m.osname = &s +} + +// Osname returns the value of the "osname" field in the mutation. +func (m *MachineMutation) Osname() (r string, exists bool) { + v := m.osname + if v == nil { + return + } + return *v, true +} + +// OldOsname returns the old "osname" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldOsname(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsname is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsname requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsname: %w", err) + } + return oldValue.Osname, nil +} + +// ClearOsname clears the value of the "osname" field. +func (m *MachineMutation) ClearOsname() { + m.osname = nil + m.clearedFields[machine.FieldOsname] = struct{}{} +} + +// OsnameCleared returns if the "osname" field was cleared in this mutation. +func (m *MachineMutation) OsnameCleared() bool { + _, ok := m.clearedFields[machine.FieldOsname] + return ok +} + +// ResetOsname resets all changes to the "osname" field. +func (m *MachineMutation) ResetOsname() { + m.osname = nil + delete(m.clearedFields, machine.FieldOsname) +} + +// SetOsversion sets the "osversion" field. +func (m *MachineMutation) SetOsversion(s string) { + m.osversion = &s +} + +// Osversion returns the value of the "osversion" field in the mutation. +func (m *MachineMutation) Osversion() (r string, exists bool) { + v := m.osversion + if v == nil { + return + } + return *v, true +} + +// OldOsversion returns the old "osversion" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldOsversion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsversion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsversion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsversion: %w", err) + } + return oldValue.Osversion, nil +} + +// ClearOsversion clears the value of the "osversion" field. +func (m *MachineMutation) ClearOsversion() { + m.osversion = nil + m.clearedFields[machine.FieldOsversion] = struct{}{} +} + +// OsversionCleared returns if the "osversion" field was cleared in this mutation. +func (m *MachineMutation) OsversionCleared() bool { + _, ok := m.clearedFields[machine.FieldOsversion] + return ok +} + +// ResetOsversion resets all changes to the "osversion" field. +func (m *MachineMutation) ResetOsversion() { + m.osversion = nil + delete(m.clearedFields, machine.FieldOsversion) +} + +// SetFeatureflags sets the "featureflags" field. +func (m *MachineMutation) SetFeatureflags(s string) { + m.featureflags = &s +} + +// Featureflags returns the value of the "featureflags" field in the mutation. +func (m *MachineMutation) Featureflags() (r string, exists bool) { + v := m.featureflags + if v == nil { + return + } + return *v, true +} + +// OldFeatureflags returns the old "featureflags" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldFeatureflags(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatureflags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatureflags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatureflags: %w", err) + } + return oldValue.Featureflags, nil +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (m *MachineMutation) ClearFeatureflags() { + m.featureflags = nil + m.clearedFields[machine.FieldFeatureflags] = struct{}{} +} + +// FeatureflagsCleared returns if the "featureflags" field was cleared in this mutation. +func (m *MachineMutation) FeatureflagsCleared() bool { + _, ok := m.clearedFields[machine.FieldFeatureflags] + return ok +} + +// ResetFeatureflags resets all changes to the "featureflags" field. +func (m *MachineMutation) ResetFeatureflags() { + m.featureflags = nil + delete(m.clearedFields, machine.FieldFeatureflags) +} + +// SetHubstate sets the "hubstate" field. +func (m *MachineMutation) SetHubstate(ms map[string]schema.ItemState) { + m.hubstate = &ms +} + +// Hubstate returns the value of the "hubstate" field in the mutation. +func (m *MachineMutation) Hubstate() (r map[string]schema.ItemState, exists bool) { + v := m.hubstate + if v == nil { + return + } + return *v, true +} + +// OldHubstate returns the old "hubstate" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldHubstate(ctx context.Context) (v map[string]schema.ItemState, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHubstate is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHubstate requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHubstate: %w", err) + } + return oldValue.Hubstate, nil +} + +// ClearHubstate clears the value of the "hubstate" field. +func (m *MachineMutation) ClearHubstate() { + m.hubstate = nil + m.clearedFields[machine.FieldHubstate] = struct{}{} +} + +// HubstateCleared returns if the "hubstate" field was cleared in this mutation. +func (m *MachineMutation) HubstateCleared() bool { + _, ok := m.clearedFields[machine.FieldHubstate] + return ok +} + +// ResetHubstate resets all changes to the "hubstate" field. +func (m *MachineMutation) ResetHubstate() { + m.hubstate = nil + delete(m.clearedFields, machine.FieldHubstate) +} + +// SetDatasources sets the "datasources" field. +func (m *MachineMutation) SetDatasources(value map[string]int64) { + m.datasources = &value +} + +// Datasources returns the value of the "datasources" field in the mutation. +func (m *MachineMutation) Datasources() (r map[string]int64, exists bool) { + v := m.datasources + if v == nil { + return + } + return *v, true +} + +// OldDatasources returns the old "datasources" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldDatasources(ctx context.Context) (v map[string]int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDatasources is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDatasources requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDatasources: %w", err) + } + return oldValue.Datasources, nil +} + +// ClearDatasources clears the value of the "datasources" field. +func (m *MachineMutation) ClearDatasources() { + m.datasources = nil + m.clearedFields[machine.FieldDatasources] = struct{}{} +} + +// DatasourcesCleared returns if the "datasources" field was cleared in this mutation. +func (m *MachineMutation) DatasourcesCleared() bool { + _, ok := m.clearedFields[machine.FieldDatasources] + return ok +} + +// ResetDatasources resets all changes to the "datasources" field. +func (m *MachineMutation) ResetDatasources() { + m.datasources = nil + delete(m.clearedFields, machine.FieldDatasources) +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by ids. func (m *MachineMutation) AddAlertIDs(ids ...int) { if m.alerts == nil { @@ -7010,7 +7482,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 12) + fields := make([]string, 0, 17) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7047,6 +7519,21 @@ func (m *MachineMutation) Fields() []string { if m.auth_type != nil { fields = append(fields, machine.FieldAuthType) } + if m.osname != nil { + fields = append(fields, machine.FieldOsname) + } + if m.osversion != nil { + fields = append(fields, machine.FieldOsversion) + } + if m.featureflags != nil { + fields = append(fields, machine.FieldFeatureflags) + } + if m.hubstate != nil { + fields = append(fields, machine.FieldHubstate) + } + if m.datasources != nil { + fields = append(fields, machine.FieldDatasources) + } return fields } @@ -7079,6 +7566,16 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Status() case machine.FieldAuthType: return m.AuthType() + case machine.FieldOsname: + return m.Osname() + case machine.FieldOsversion: + return m.Osversion() + case machine.FieldFeatureflags: + return m.Featureflags() + case machine.FieldHubstate: + return m.Hubstate() + case machine.FieldDatasources: + return m.Datasources() } return nil, false } @@ -7112,6 +7609,16 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldStatus(ctx) case machine.FieldAuthType: return m.OldAuthType(ctx) + case machine.FieldOsname: + return m.OldOsname(ctx) + case machine.FieldOsversion: + return m.OldOsversion(ctx) + case machine.FieldFeatureflags: + return m.OldFeatureflags(ctx) + case machine.FieldHubstate: + return m.OldHubstate(ctx) + case machine.FieldDatasources: + return m.OldDatasources(ctx) } return nil, fmt.Errorf("unknown Machine field %s", name) } @@ -7205,22 +7712,57 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetAuthType(v) return nil - } - return fmt.Errorf("unknown Machine field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *MachineMutation) AddedFields() []string { - return nil -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { - return nil, false -} + case machine.FieldOsname: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsname(v) + return nil + case machine.FieldOsversion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsversion(v) + return nil + case machine.FieldFeatureflags: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatureflags(v) + return nil + case machine.FieldHubstate: + v, ok := value.(map[string]schema.ItemState) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHubstate(v) + return nil + case machine.FieldDatasources: + v, ok := value.(map[string]int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDatasources(v) + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MachineMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field @@ -7250,6 +7792,21 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldStatus) { fields = append(fields, machine.FieldStatus) } + if m.FieldCleared(machine.FieldOsname) { + fields = append(fields, machine.FieldOsname) + } + if m.FieldCleared(machine.FieldOsversion) { + fields = append(fields, machine.FieldOsversion) + } + if m.FieldCleared(machine.FieldFeatureflags) { + fields = append(fields, machine.FieldFeatureflags) + } + if m.FieldCleared(machine.FieldHubstate) { + fields = append(fields, machine.FieldHubstate) + } + if m.FieldCleared(machine.FieldDatasources) { + fields = append(fields, machine.FieldDatasources) + } return fields } @@ -7279,6 +7836,21 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldStatus: m.ClearStatus() return nil + case machine.FieldOsname: + m.ClearOsname() + return nil + case machine.FieldOsversion: + m.ClearOsversion() + return nil + case machine.FieldFeatureflags: + m.ClearFeatureflags() + return nil + case machine.FieldHubstate: + m.ClearHubstate() + return nil + case machine.FieldDatasources: + m.ClearDatasources() + return nil } return fmt.Errorf("unknown Machine nullable field %s", name) } @@ -7323,6 +7895,21 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldAuthType: m.ResetAuthType() return nil + case machine.FieldOsname: + m.ResetOsname() + return nil + case machine.FieldOsversion: + m.ResetOsversion() + return nil + case machine.FieldFeatureflags: + m.ResetFeatureflags() + return nil + case machine.FieldHubstate: + m.ResetHubstate() + return nil + case machine.FieldDatasources: + m.ResetDatasources() + return nil } return fmt.Errorf("unknown Machine field %s", name) } @@ -8044,3 +8631,567 @@ func (m *MetaMutation) ResetEdge(name string) error { } return fmt.Errorf("unknown Meta edge %s", name) } + +// MetricMutation represents an operation that mutates the Metric nodes in the graph. +type MetricMutation struct { + config + op Op + typ string + id *int + generated_type *metric.GeneratedType + generated_by *string + collected_at *time.Time + pushed_at *time.Time + payload *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Metric, error) + predicates []predicate.Metric +} + +var _ ent.Mutation = (*MetricMutation)(nil) + +// metricOption allows management of the mutation configuration using functional options. +type metricOption func(*MetricMutation) + +// newMetricMutation creates new mutation for the Metric entity. +func newMetricMutation(c config, op Op, opts ...metricOption) *MetricMutation { + m := &MetricMutation{ + config: c, + op: op, + typ: TypeMetric, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMetricID sets the ID field of the mutation. +func withMetricID(id int) metricOption { + return func(m *MetricMutation) { + var ( + err error + once sync.Once + value *Metric + ) + m.oldValue = func(ctx context.Context) (*Metric, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Metric.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMetric sets the old Metric of the mutation. +func withMetric(node *Metric) metricOption { + return func(m *MetricMutation) { + m.oldValue = func(context.Context) (*Metric, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MetricMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MetricMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MetricMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MetricMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Metric.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetGeneratedType sets the "generated_type" field. +func (m *MetricMutation) SetGeneratedType(mt metric.GeneratedType) { + m.generated_type = &mt +} + +// GeneratedType returns the value of the "generated_type" field in the mutation. +func (m *MetricMutation) GeneratedType() (r metric.GeneratedType, exists bool) { + v := m.generated_type + if v == nil { + return + } + return *v, true +} + +// OldGeneratedType returns the old "generated_type" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldGeneratedType(ctx context.Context) (v metric.GeneratedType, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeneratedType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeneratedType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeneratedType: %w", err) + } + return oldValue.GeneratedType, nil +} + +// ResetGeneratedType resets all changes to the "generated_type" field. +func (m *MetricMutation) ResetGeneratedType() { + m.generated_type = nil +} + +// SetGeneratedBy sets the "generated_by" field. +func (m *MetricMutation) SetGeneratedBy(s string) { + m.generated_by = &s +} + +// GeneratedBy returns the value of the "generated_by" field in the mutation. +func (m *MetricMutation) GeneratedBy() (r string, exists bool) { + v := m.generated_by + if v == nil { + return + } + return *v, true +} + +// OldGeneratedBy returns the old "generated_by" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldGeneratedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeneratedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeneratedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeneratedBy: %w", err) + } + return oldValue.GeneratedBy, nil +} + +// ResetGeneratedBy resets all changes to the "generated_by" field. +func (m *MetricMutation) ResetGeneratedBy() { + m.generated_by = nil +} + +// SetCollectedAt sets the "collected_at" field. +func (m *MetricMutation) SetCollectedAt(t time.Time) { + m.collected_at = &t +} + +// CollectedAt returns the value of the "collected_at" field in the mutation. +func (m *MetricMutation) CollectedAt() (r time.Time, exists bool) { + v := m.collected_at + if v == nil { + return + } + return *v, true +} + +// OldCollectedAt returns the old "collected_at" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldCollectedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCollectedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCollectedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCollectedAt: %w", err) + } + return oldValue.CollectedAt, nil +} + +// ResetCollectedAt resets all changes to the "collected_at" field. +func (m *MetricMutation) ResetCollectedAt() { + m.collected_at = nil +} + +// SetPushedAt sets the "pushed_at" field. +func (m *MetricMutation) SetPushedAt(t time.Time) { + m.pushed_at = &t +} + +// PushedAt returns the value of the "pushed_at" field in the mutation. +func (m *MetricMutation) PushedAt() (r time.Time, exists bool) { + v := m.pushed_at + if v == nil { + return + } + return *v, true +} + +// OldPushedAt returns the old "pushed_at" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldPushedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPushedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPushedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPushedAt: %w", err) + } + return oldValue.PushedAt, nil +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (m *MetricMutation) ClearPushedAt() { + m.pushed_at = nil + m.clearedFields[metric.FieldPushedAt] = struct{}{} +} + +// PushedAtCleared returns if the "pushed_at" field was cleared in this mutation. +func (m *MetricMutation) PushedAtCleared() bool { + _, ok := m.clearedFields[metric.FieldPushedAt] + return ok +} + +// ResetPushedAt resets all changes to the "pushed_at" field. +func (m *MetricMutation) ResetPushedAt() { + m.pushed_at = nil + delete(m.clearedFields, metric.FieldPushedAt) +} + +// SetPayload sets the "payload" field. +func (m *MetricMutation) SetPayload(s string) { + m.payload = &s +} + +// Payload returns the value of the "payload" field in the mutation. +func (m *MetricMutation) Payload() (r string, exists bool) { + v := m.payload + if v == nil { + return + } + return *v, true +} + +// OldPayload returns the old "payload" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldPayload(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPayload is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPayload requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPayload: %w", err) + } + return oldValue.Payload, nil +} + +// ResetPayload resets all changes to the "payload" field. +func (m *MetricMutation) ResetPayload() { + m.payload = nil +} + +// Where appends a list predicates to the MetricMutation builder. +func (m *MetricMutation) Where(ps ...predicate.Metric) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the MetricMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MetricMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Metric, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *MetricMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *MetricMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Metric). +func (m *MetricMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MetricMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.generated_type != nil { + fields = append(fields, metric.FieldGeneratedType) + } + if m.generated_by != nil { + fields = append(fields, metric.FieldGeneratedBy) + } + if m.collected_at != nil { + fields = append(fields, metric.FieldCollectedAt) + } + if m.pushed_at != nil { + fields = append(fields, metric.FieldPushedAt) + } + if m.payload != nil { + fields = append(fields, metric.FieldPayload) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MetricMutation) Field(name string) (ent.Value, bool) { + switch name { + case metric.FieldGeneratedType: + return m.GeneratedType() + case metric.FieldGeneratedBy: + return m.GeneratedBy() + case metric.FieldCollectedAt: + return m.CollectedAt() + case metric.FieldPushedAt: + return m.PushedAt() + case metric.FieldPayload: + return m.Payload() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MetricMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case metric.FieldGeneratedType: + return m.OldGeneratedType(ctx) + case metric.FieldGeneratedBy: + return m.OldGeneratedBy(ctx) + case metric.FieldCollectedAt: + return m.OldCollectedAt(ctx) + case metric.FieldPushedAt: + return m.OldPushedAt(ctx) + case metric.FieldPayload: + return m.OldPayload(ctx) + } + return nil, fmt.Errorf("unknown Metric field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetricMutation) SetField(name string, value ent.Value) error { + switch name { + case metric.FieldGeneratedType: + v, ok := value.(metric.GeneratedType) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeneratedType(v) + return nil + case metric.FieldGeneratedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeneratedBy(v) + return nil + case metric.FieldCollectedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCollectedAt(v) + return nil + case metric.FieldPushedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPushedAt(v) + return nil + case metric.FieldPayload: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPayload(v) + return nil + } + return fmt.Errorf("unknown Metric field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MetricMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MetricMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetricMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Metric numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MetricMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(metric.FieldPushedAt) { + fields = append(fields, metric.FieldPushedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MetricMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MetricMutation) ClearField(name string) error { + switch name { + case metric.FieldPushedAt: + m.ClearPushedAt() + return nil + } + return fmt.Errorf("unknown Metric nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MetricMutation) ResetField(name string) error { + switch name { + case metric.FieldGeneratedType: + m.ResetGeneratedType() + return nil + case metric.FieldGeneratedBy: + m.ResetGeneratedBy() + return nil + case metric.FieldCollectedAt: + m.ResetCollectedAt() + return nil + case metric.FieldPushedAt: + m.ResetPushedAt() + return nil + case metric.FieldPayload: + m.ResetPayload() + return nil + } + return fmt.Errorf("unknown Metric field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MetricMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MetricMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MetricMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MetricMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MetricMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MetricMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MetricMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Metric unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MetricMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Metric edge %s", name) +} diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go index ad2e6d3f327..8ad03e2fc48 100644 --- a/pkg/database/ent/predicate/predicate.go +++ b/pkg/database/ent/predicate/predicate.go @@ -29,3 +29,6 @@ type Machine func(*sql.Selector) // Meta is the predicate function for meta builders. type Meta func(*sql.Selector) + +// Metric is the predicate function for metric builders. +type Metric func(*sql.Selector) diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 242b5f5fe4a..599c4c404fc 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -30,6 +30,9 @@ func (Bouncer) Fields() []ent.Field { field.String("version").Optional().StructTag(`json:"version"`), field.Time("last_pull").Nillable().Optional().StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), + field.String("osname").Optional(), + field.String("osversion").Optional(), + field.String("featureflags").Optional(), } } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 7b4d97ed35c..6fdcea2d824 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -8,6 +8,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +// ItemState is defined here instead of using pkg/models/HubItem to avoid introducing a dependency +type ItemState struct { + Status string `json:"status,omitempty"` + Version string `json:"version,omitempty"` +} + // Machine holds the schema definition for the Machine entity. type Machine struct { ent.Schema @@ -38,6 +44,11 @@ func (Machine) Fields() []ent.Field { Default(false), field.String("status").Optional(), field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), + field.String("osname").Optional(), + field.String("osversion").Optional(), + field.String("featureflags").Optional(), + field.JSON("hubstate", map[string]ItemState{}).Optional(), + field.JSON("datasources", map[string]int64{}).Optional(), } } diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go new file mode 100644 index 00000000000..9de3f21f464 --- /dev/null +++ b/pkg/database/ent/schema/metric.go @@ -0,0 +1,43 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Metric is actually a set of metrics collected by a device +// (logprocessor, bouncer, etc) at a given time. +type Metric struct { + ent.Schema +} + +func (Metric) Fields() []ent.Field { + return []ent.Field{ + field.Enum("generated_type"). + Values("LP", "RC"). + Immutable(). + Comment("Type of the metrics source: LP=logprocessor, RC=remediation"), + field.String("generated_by"). + Immutable(). + Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), + field.Time("collected_at"). + Immutable(). + Comment("When the metrics are collected/calculated at the source"), + field.Time("pushed_at"). + Nillable(). + Optional(). + Comment("When the metrics are sent to the console"), + field.String("payload"). + Immutable(). + Comment("The actual metrics (item0)"), + } +} + +func (Metric) Indexes() []ent.Index { + return []ent.Index{ + // Don't store the same metrics multiple times. + index.Fields("generated_type", "generated_by", "collected_at"). + Unique(), + } +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 27b39c12502..bf8221ce4a5 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -28,6 +28,8 @@ type Tx struct { Machine *MachineClient // Meta is the client for interacting with the Meta builders. Meta *MetaClient + // Metric is the client for interacting with the Metric builders. + Metric *MetricClient // lazily loaded. client *Client @@ -167,6 +169,7 @@ func (tx *Tx) init() { tx.Lock = NewLockClient(tx.config) tx.Machine = NewMachineClient(tx.config) tx.Meta = NewMetaClient(tx.config) + tx.Metric = NewMetricClient(tx.config) } // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. From 27935f6d4cf0ce8a32ff0462c46738645db8aa6d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 1 Jul 2024 16:35:34 +0200 Subject: [PATCH 204/318] fix "cscli [machines|bouncers] delete" autocompletion (#3101) * fix "cscli [machines|bouncers] delete" autocompletion * func test --- cmd/crowdsec-cli/bouncers.go | 20 +++++++++++++++++++- cmd/crowdsec-cli/machines.go | 20 +++++++++++++++++++- test/bats/10_bouncers.bats | 12 ++++++++++++ test/bats/30_machines.bats | 12 ++++++++++++ 4 files changed, 62 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 0c96cc5eee4..3da9575146e 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -206,10 +206,28 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // need to load config and db because PersistentPreRunE is not called for completions + + var err error + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + if err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + bouncers, err := cli.db.ListBouncers() if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp } ret := []string{} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 9014d3d803a..746045d0eab 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -349,10 +349,28 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // need to load config and db because PersistentPreRunE is not called for completions + + var err error + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + if err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + machines, err := cli.db.ListMachines() if err != nil { cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp } ret := []string{} diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 5bf4b5358db..9e795e584b4 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -42,6 +42,18 @@ teardown() { assert_json '[]' } +@test "bouncers delete has autocompletion" { + rune -0 cscli bouncers add foo1 + rune -0 cscli bouncers add foo2 + rune -0 cscli bouncers add bar + rune -0 cscli bouncers add baz + rune -0 cscli __complete bouncers delete 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' +} + @test "cscli bouncers list" { export API_KEY=bouncerkey rune -0 cscli bouncers add ciTestBouncer --key "$API_KEY" diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 1d65151b6c8..1af5e97dcb4 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,6 +62,18 @@ teardown() { assert_output 1 } +@test "machines delete has autocompletion" { + rune -0 cscli machines add -a -f /dev/null foo1 + rune -0 cscli machines add -a -f /dev/null foo2 + rune -0 cscli machines add -a -f /dev/null bar + rune -0 cscli machines add -a -f /dev/null baz + rune -0 cscli __complete machines delete 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' +} + @test "heartbeat is initially null" { rune -0 cscli machines add foo --auto --file /dev/null rune -0 cscli machines list -o json From b1cf83975e614a34f2fc7b26a3ba130fa3ec0cb4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:08:41 +0200 Subject: [PATCH 205/318] db refact: drop column machines.status (#3102) --- pkg/database/ent/machine.go | 13 +---- pkg/database/ent/machine/machine.go | 8 --- pkg/database/ent/machine/where.go | 80 ----------------------------- pkg/database/ent/machine_create.go | 18 ------- pkg/database/ent/machine_update.go | 52 ------------------- pkg/database/ent/migrate/schema.go | 1 - pkg/database/ent/mutation.go | 75 +-------------------------- pkg/database/ent/runtime.go | 2 +- pkg/database/ent/schema/machine.go | 1 - 9 files changed, 3 insertions(+), 247 deletions(-) diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index fddb2e6a8b3..24c9fdb57e6 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -39,8 +39,6 @@ type Machine struct { Version string `json:"version,omitempty"` // IsValidated holds the value of the "isValidated" field. IsValidated bool `json:"isValidated,omitempty"` - // Status holds the value of the "status" field. - Status string `json:"status,omitempty"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` // Osname holds the value of the "osname" field. @@ -88,7 +86,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case machine.FieldID: values[i] = new(sql.NullInt64) - case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: values[i] = new(sql.NullString) case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) @@ -175,12 +173,6 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.IsValidated = value.Bool } - case machine.FieldStatus: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field status", values[i]) - } else if value.Valid { - m.Status = value.String - } case machine.FieldAuthType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field auth_type", values[i]) @@ -295,9 +287,6 @@ func (m *Machine) String() string { builder.WriteString("isValidated=") builder.WriteString(fmt.Sprintf("%v", m.IsValidated)) builder.WriteString(", ") - builder.WriteString("status=") - builder.WriteString(m.Status) - builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(m.AuthType) builder.WriteString(", ") diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 179059edd4d..009e6e19c35 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -34,8 +34,6 @@ const ( FieldVersion = "version" // FieldIsValidated holds the string denoting the isvalidated field in the database. FieldIsValidated = "is_validated" - // FieldStatus holds the string denoting the status field in the database. - FieldStatus = "status" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" // FieldOsname holds the string denoting the osname field in the database. @@ -74,7 +72,6 @@ var Columns = []string{ FieldScenarios, FieldVersion, FieldIsValidated, - FieldStatus, FieldAuthType, FieldOsname, FieldOsversion, @@ -168,11 +165,6 @@ func ByIsValidated(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldIsValidated, opts...).ToFunc() } -// ByStatus orders the results by the status field. -func ByStatus(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldStatus, opts...).ToFunc() -} - // ByAuthType orders the results by the auth_type field. func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index aca66135f5c..de523510f33 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -105,11 +105,6 @@ func IsValidated(v bool) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } -// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. -func Status(v string) predicate.Machine { - return predicate.Machine(sql.FieldEQ(FieldStatus, v)) -} - // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) @@ -665,81 +660,6 @@ func IsValidatedNEQ(v bool) predicate.Machine { return predicate.Machine(sql.FieldNEQ(FieldIsValidated, v)) } -// StatusEQ applies the EQ predicate on the "status" field. -func StatusEQ(v string) predicate.Machine { - return predicate.Machine(sql.FieldEQ(FieldStatus, v)) -} - -// StatusNEQ applies the NEQ predicate on the "status" field. -func StatusNEQ(v string) predicate.Machine { - return predicate.Machine(sql.FieldNEQ(FieldStatus, v)) -} - -// StatusIn applies the In predicate on the "status" field. -func StatusIn(vs ...string) predicate.Machine { - return predicate.Machine(sql.FieldIn(FieldStatus, vs...)) -} - -// StatusNotIn applies the NotIn predicate on the "status" field. -func StatusNotIn(vs ...string) predicate.Machine { - return predicate.Machine(sql.FieldNotIn(FieldStatus, vs...)) -} - -// StatusGT applies the GT predicate on the "status" field. -func StatusGT(v string) predicate.Machine { - return predicate.Machine(sql.FieldGT(FieldStatus, v)) -} - -// StatusGTE applies the GTE predicate on the "status" field. -func StatusGTE(v string) predicate.Machine { - return predicate.Machine(sql.FieldGTE(FieldStatus, v)) -} - -// StatusLT applies the LT predicate on the "status" field. -func StatusLT(v string) predicate.Machine { - return predicate.Machine(sql.FieldLT(FieldStatus, v)) -} - -// StatusLTE applies the LTE predicate on the "status" field. -func StatusLTE(v string) predicate.Machine { - return predicate.Machine(sql.FieldLTE(FieldStatus, v)) -} - -// StatusContains applies the Contains predicate on the "status" field. -func StatusContains(v string) predicate.Machine { - return predicate.Machine(sql.FieldContains(FieldStatus, v)) -} - -// StatusHasPrefix applies the HasPrefix predicate on the "status" field. -func StatusHasPrefix(v string) predicate.Machine { - return predicate.Machine(sql.FieldHasPrefix(FieldStatus, v)) -} - -// StatusHasSuffix applies the HasSuffix predicate on the "status" field. -func StatusHasSuffix(v string) predicate.Machine { - return predicate.Machine(sql.FieldHasSuffix(FieldStatus, v)) -} - -// StatusIsNil applies the IsNil predicate on the "status" field. -func StatusIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldStatus)) -} - -// StatusNotNil applies the NotNil predicate on the "status" field. -func StatusNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldStatus)) -} - -// StatusEqualFold applies the EqualFold predicate on the "status" field. -func StatusEqualFold(v string) predicate.Machine { - return predicate.Machine(sql.FieldEqualFold(FieldStatus, v)) -} - -// StatusContainsFold applies the ContainsFold predicate on the "status" field. -func StatusContainsFold(v string) predicate.Machine { - return predicate.Machine(sql.FieldContainsFold(FieldStatus, v)) -} - // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 4ae0e5a9d1f..a68f7a23966 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -138,20 +138,6 @@ func (mc *MachineCreate) SetNillableIsValidated(b *bool) *MachineCreate { return mc } -// SetStatus sets the "status" field. -func (mc *MachineCreate) SetStatus(s string) *MachineCreate { - mc.mutation.SetStatus(s) - return mc -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (mc *MachineCreate) SetNillableStatus(s *string) *MachineCreate { - if s != nil { - mc.SetStatus(*s) - } - return mc -} - // SetAuthType sets the "auth_type" field. func (mc *MachineCreate) SetAuthType(s string) *MachineCreate { mc.mutation.SetAuthType(s) @@ -386,10 +372,6 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) _node.IsValidated = value } - if value, ok := mc.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - _node.Status = value - } if value, ok := mc.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index aa0f02542c1..c9a4f0b72ff 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -158,26 +158,6 @@ func (mu *MachineUpdate) SetNillableIsValidated(b *bool) *MachineUpdate { return mu } -// SetStatus sets the "status" field. -func (mu *MachineUpdate) SetStatus(s string) *MachineUpdate { - mu.mutation.SetStatus(s) - return mu -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (mu *MachineUpdate) SetNillableStatus(s *string) *MachineUpdate { - if s != nil { - mu.SetStatus(*s) - } - return mu -} - -// ClearStatus clears the value of the "status" field. -func (mu *MachineUpdate) ClearStatus() *MachineUpdate { - mu.mutation.ClearStatus() - return mu -} - // SetAuthType sets the "auth_type" field. func (mu *MachineUpdate) SetAuthType(s string) *MachineUpdate { mu.mutation.SetAuthType(s) @@ -411,12 +391,6 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.IsValidated(); ok { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } - if value, ok := mu.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - } - if mu.mutation.StatusCleared() { - _spec.ClearField(machine.FieldStatus, field.TypeString) - } if value, ok := mu.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } @@ -643,26 +617,6 @@ func (muo *MachineUpdateOne) SetNillableIsValidated(b *bool) *MachineUpdateOne { return muo } -// SetStatus sets the "status" field. -func (muo *MachineUpdateOne) SetStatus(s string) *MachineUpdateOne { - muo.mutation.SetStatus(s) - return muo -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (muo *MachineUpdateOne) SetNillableStatus(s *string) *MachineUpdateOne { - if s != nil { - muo.SetStatus(*s) - } - return muo -} - -// ClearStatus clears the value of the "status" field. -func (muo *MachineUpdateOne) ClearStatus() *MachineUpdateOne { - muo.mutation.ClearStatus() - return muo -} - // SetAuthType sets the "auth_type" field. func (muo *MachineUpdateOne) SetAuthType(s string) *MachineUpdateOne { muo.mutation.SetAuthType(s) @@ -926,12 +880,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if value, ok := muo.mutation.IsValidated(); ok { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } - if value, ok := muo.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - } - if muo.mutation.StatusCleared() { - _spec.ClearField(machine.FieldStatus, field.TypeString) - } if value, ok := muo.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 5c32c472403..5b436830192 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -205,7 +205,6 @@ var ( {Name: "scenarios", Type: field.TypeString, Nullable: true, Size: 100000}, {Name: "version", Type: field.TypeString, Nullable: true}, {Name: "is_validated", Type: field.TypeBool, Default: false}, - {Name: "status", Type: field.TypeString, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "password"}, {Name: "osname", Type: field.TypeString, Nullable: true}, {Name: "osversion", Type: field.TypeString, Nullable: true}, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 8d109ece379..045ecb3c9af 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -6538,7 +6538,6 @@ type MachineMutation struct { scenarios *string version *string isValidated *bool - status *string auth_type *string osname *string osversion *string @@ -7064,55 +7063,6 @@ func (m *MachineMutation) ResetIsValidated() { m.isValidated = nil } -// SetStatus sets the "status" field. -func (m *MachineMutation) SetStatus(s string) { - m.status = &s -} - -// Status returns the value of the "status" field in the mutation. -func (m *MachineMutation) Status() (r string, exists bool) { - v := m.status - if v == nil { - return - } - return *v, true -} - -// OldStatus returns the old "status" field's value of the Machine entity. -// If the Machine object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldStatus(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStatus is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStatus requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldStatus: %w", err) - } - return oldValue.Status, nil -} - -// ClearStatus clears the value of the "status" field. -func (m *MachineMutation) ClearStatus() { - m.status = nil - m.clearedFields[machine.FieldStatus] = struct{}{} -} - -// StatusCleared returns if the "status" field was cleared in this mutation. -func (m *MachineMutation) StatusCleared() bool { - _, ok := m.clearedFields[machine.FieldStatus] - return ok -} - -// ResetStatus resets all changes to the "status" field. -func (m *MachineMutation) ResetStatus() { - m.status = nil - delete(m.clearedFields, machine.FieldStatus) -} - // SetAuthType sets the "auth_type" field. func (m *MachineMutation) SetAuthType(s string) { m.auth_type = &s @@ -7482,7 +7432,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 17) + fields := make([]string, 0, 16) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7513,9 +7463,6 @@ func (m *MachineMutation) Fields() []string { if m.isValidated != nil { fields = append(fields, machine.FieldIsValidated) } - if m.status != nil { - fields = append(fields, machine.FieldStatus) - } if m.auth_type != nil { fields = append(fields, machine.FieldAuthType) } @@ -7562,8 +7509,6 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Version() case machine.FieldIsValidated: return m.IsValidated() - case machine.FieldStatus: - return m.Status() case machine.FieldAuthType: return m.AuthType() case machine.FieldOsname: @@ -7605,8 +7550,6 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldVersion(ctx) case machine.FieldIsValidated: return m.OldIsValidated(ctx) - case machine.FieldStatus: - return m.OldStatus(ctx) case machine.FieldAuthType: return m.OldAuthType(ctx) case machine.FieldOsname: @@ -7698,13 +7641,6 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetIsValidated(v) return nil - case machine.FieldStatus: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetStatus(v) - return nil case machine.FieldAuthType: v, ok := value.(string) if !ok { @@ -7789,9 +7725,6 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldVersion) { fields = append(fields, machine.FieldVersion) } - if m.FieldCleared(machine.FieldStatus) { - fields = append(fields, machine.FieldStatus) - } if m.FieldCleared(machine.FieldOsname) { fields = append(fields, machine.FieldOsname) } @@ -7833,9 +7766,6 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldVersion: m.ClearVersion() return nil - case machine.FieldStatus: - m.ClearStatus() - return nil case machine.FieldOsname: m.ClearOsname() return nil @@ -7889,9 +7819,6 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldIsValidated: m.ResetIsValidated() return nil - case machine.FieldStatus: - m.ResetStatus() - return nil case machine.FieldAuthType: m.ResetAuthType() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 8d50d916029..15413490633 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -151,7 +151,7 @@ func init() { // machine.DefaultIsValidated holds the default value on creation for the isValidated field. machine.DefaultIsValidated = machineDescIsValidated.Default.(bool) // machineDescAuthType is the schema descriptor for auth_type field. - machineDescAuthType := machineFields[11].Descriptor() + machineDescAuthType := machineFields[10].Descriptor() // machine.DefaultAuthType holds the default value on creation for the auth_type field. machine.DefaultAuthType = machineDescAuthType.Default.(string) metaFields := schema.Meta{}.Fields() diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 6fdcea2d824..071586f0c84 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -42,7 +42,6 @@ func (Machine) Fields() []ent.Field { field.String("version").Optional(), field.Bool("isValidated"). Default(false), - field.String("status").Optional(), field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), field.String("osname").Optional(), field.String("osversion").Optional(), From 4635d04b22308b71c061bdd36f88078ea87a2345 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:49:47 +0200 Subject: [PATCH 206/318] lapi detailed metrics: API spec + models (#3100) * lapi detailed metrics: API spec + models * models+db: split hubstate by item type --- pkg/database/ent/machine.go | 2 +- pkg/database/ent/machine_create.go | 2 +- pkg/database/ent/machine_update.go | 4 +- pkg/database/ent/mutation.go | 10 +- pkg/database/ent/schema/machine.go | 2 +- pkg/models/all_metrics.go | 234 +++++++++++++++++++ pkg/models/base_metrics.go | 210 +++++++++++++++++ pkg/models/console_options.go | 27 +++ pkg/models/detailed_metrics.go | 173 ++++++++++++++ pkg/models/hub_item.go | 56 +++++ pkg/models/hub_items.go | 83 +++++++ pkg/models/lapi_metrics.go | 157 +++++++++++++ pkg/models/localapi_swagger.yaml | 220 +++++++++++++++++ pkg/models/log_processors_metrics.go | 219 +++++++++++++++++ pkg/models/metrics_detail_item.go | 158 +++++++++++++ pkg/models/metrics_labels.go | 27 +++ pkg/models/metrics_meta.go | 88 +++++++ pkg/models/o_sversion.go | 88 +++++++ pkg/models/remediation_components_metrics.go | 139 +++++++++++ pkg/models/success_response.go | 73 ++++++ 20 files changed, 1962 insertions(+), 10 deletions(-) create mode 100644 pkg/models/all_metrics.go create mode 100644 pkg/models/base_metrics.go create mode 100644 pkg/models/console_options.go create mode 100644 pkg/models/detailed_metrics.go create mode 100644 pkg/models/hub_item.go create mode 100644 pkg/models/hub_items.go create mode 100644 pkg/models/lapi_metrics.go create mode 100644 pkg/models/log_processors_metrics.go create mode 100644 pkg/models/metrics_detail_item.go create mode 100644 pkg/models/metrics_labels.go create mode 100644 pkg/models/metrics_meta.go create mode 100644 pkg/models/o_sversion.go create mode 100644 pkg/models/remediation_components_metrics.go create mode 100644 pkg/models/success_response.go diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 24c9fdb57e6..76127065791 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -48,7 +48,7 @@ type Machine struct { // Featureflags holds the value of the "featureflags" field. Featureflags string `json:"featureflags,omitempty"` // Hubstate holds the value of the "hubstate" field. - Hubstate map[string]schema.ItemState `json:"hubstate,omitempty"` + Hubstate map[string][]schema.ItemState `json:"hubstate,omitempty"` // Datasources holds the value of the "datasources" field. Datasources map[string]int64 `json:"datasources,omitempty"` // Edges holds the relations/edges for other nodes in the graph. diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index a68f7a23966..fba8400798c 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -195,7 +195,7 @@ func (mc *MachineCreate) SetNillableFeatureflags(s *string) *MachineCreate { } // SetHubstate sets the "hubstate" field. -func (mc *MachineCreate) SetHubstate(ms map[string]schema.ItemState) *MachineCreate { +func (mc *MachineCreate) SetHubstate(ms map[string][]schema.ItemState) *MachineCreate { mc.mutation.SetHubstate(ms) return mc } diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index c9a4f0b72ff..531baabf0d6 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -233,7 +233,7 @@ func (mu *MachineUpdate) ClearFeatureflags() *MachineUpdate { } // SetHubstate sets the "hubstate" field. -func (mu *MachineUpdate) SetHubstate(ms map[string]schema.ItemState) *MachineUpdate { +func (mu *MachineUpdate) SetHubstate(ms map[string][]schema.ItemState) *MachineUpdate { mu.mutation.SetHubstate(ms) return mu } @@ -692,7 +692,7 @@ func (muo *MachineUpdateOne) ClearFeatureflags() *MachineUpdateOne { } // SetHubstate sets the "hubstate" field. -func (muo *MachineUpdateOne) SetHubstate(ms map[string]schema.ItemState) *MachineUpdateOne { +func (muo *MachineUpdateOne) SetHubstate(ms map[string][]schema.ItemState) *MachineUpdateOne { muo.mutation.SetHubstate(ms) return muo } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 045ecb3c9af..573e0b5daa9 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -6542,7 +6542,7 @@ type MachineMutation struct { osname *string osversion *string featureflags *string - hubstate *map[string]schema.ItemState + hubstate *map[string][]schema.ItemState datasources *map[string]int64 clearedFields map[string]struct{} alerts map[int]struct{} @@ -7247,12 +7247,12 @@ func (m *MachineMutation) ResetFeatureflags() { } // SetHubstate sets the "hubstate" field. -func (m *MachineMutation) SetHubstate(ms map[string]schema.ItemState) { +func (m *MachineMutation) SetHubstate(ms map[string][]schema.ItemState) { m.hubstate = &ms } // Hubstate returns the value of the "hubstate" field in the mutation. -func (m *MachineMutation) Hubstate() (r map[string]schema.ItemState, exists bool) { +func (m *MachineMutation) Hubstate() (r map[string][]schema.ItemState, exists bool) { v := m.hubstate if v == nil { return @@ -7263,7 +7263,7 @@ func (m *MachineMutation) Hubstate() (r map[string]schema.ItemState, exists bool // OldHubstate returns the old "hubstate" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldHubstate(ctx context.Context) (v map[string]schema.ItemState, err error) { +func (m *MachineMutation) OldHubstate(ctx context.Context) (v map[string][]schema.ItemState, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldHubstate is only allowed on UpdateOne operations") } @@ -7670,7 +7670,7 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { m.SetFeatureflags(v) return nil case machine.FieldHubstate: - v, ok := value.(map[string]schema.ItemState) + v, ok := value.(map[string][]schema.ItemState) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 071586f0c84..1566cf70b32 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -46,7 +46,7 @@ func (Machine) Fields() []ent.Field { field.String("osname").Optional(), field.String("osversion").Optional(), field.String("featureflags").Optional(), - field.JSON("hubstate", map[string]ItemState{}).Optional(), + field.JSON("hubstate", map[string][]ItemState{}).Optional(), field.JSON("datasources", map[string]int64{}).Optional(), } } diff --git a/pkg/models/all_metrics.go b/pkg/models/all_metrics.go new file mode 100644 index 00000000000..5865070e8ef --- /dev/null +++ b/pkg/models/all_metrics.go @@ -0,0 +1,234 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AllMetrics AllMetrics +// +// swagger:model AllMetrics +type AllMetrics struct { + + // lapi + Lapi *LapiMetrics `json:"lapi,omitempty"` + + // log processors metrics + LogProcessors []*LogProcessorsMetrics `json:"log_processors"` + + // remediation components metrics + RemediationComponents []*RemediationComponentsMetrics `json:"remediation_components"` +} + +// Validate validates this all metrics +func (m *AllMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLapi(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRemediationComponents(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllMetrics) validateLapi(formats strfmt.Registry) error { + if swag.IsZero(m.Lapi) { // not required + return nil + } + + if m.Lapi != nil { + if err := m.Lapi.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("lapi") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("lapi") + } + return err + } + } + + return nil +} + +func (m *AllMetrics) validateLogProcessors(formats strfmt.Registry) error { + if swag.IsZero(m.LogProcessors) { // not required + return nil + } + + for i := 0; i < len(m.LogProcessors); i++ { + if swag.IsZero(m.LogProcessors[i]) { // not required + continue + } + + if m.LogProcessors[i] != nil { + if err := m.LogProcessors[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AllMetrics) validateRemediationComponents(formats strfmt.Registry) error { + if swag.IsZero(m.RemediationComponents) { // not required + return nil + } + + for i := 0; i < len(m.RemediationComponents); i++ { + if swag.IsZero(m.RemediationComponents[i]) { // not required + continue + } + + if m.RemediationComponents[i] != nil { + if err := m.RemediationComponents[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this all metrics based on the context it is used +func (m *AllMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLapi(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateLogProcessors(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateRemediationComponents(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllMetrics) contextValidateLapi(ctx context.Context, formats strfmt.Registry) error { + + if m.Lapi != nil { + + if swag.IsZero(m.Lapi) { // not required + return nil + } + + if err := m.Lapi.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("lapi") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("lapi") + } + return err + } + } + + return nil +} + +func (m *AllMetrics) contextValidateLogProcessors(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.LogProcessors); i++ { + + if m.LogProcessors[i] != nil { + + if swag.IsZero(m.LogProcessors[i]) { // not required + return nil + } + + if err := m.LogProcessors[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AllMetrics) contextValidateRemediationComponents(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.RemediationComponents); i++ { + + if m.RemediationComponents[i] != nil { + + if swag.IsZero(m.RemediationComponents[i]) { // not required + return nil + } + + if err := m.RemediationComponents[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AllMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AllMetrics) UnmarshalBinary(b []byte) error { + var res AllMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go new file mode 100644 index 00000000000..154d9004afe --- /dev/null +++ b/pkg/models/base_metrics.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BaseMetrics BaseMetrics +// +// swagger:model BaseMetrics +type BaseMetrics struct { + + // feature flags (expected to be empty for remediation components) + FeatureFlags []string `json:"feature_flags"` + + // metrics details + Metrics []*DetailedMetrics `json:"metrics"` + + // os + Os *OSversion `json:"os,omitempty"` + + // UTC timestamp of the startup of the software + // Required: true + UtcStartupTimestamp *int64 `json:"utc_startup_timestamp"` + + // version of the remediation component + // Required: true + Version *string `json:"version"` +} + +// Validate validates this base metrics +func (m *BaseMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMetrics(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUtcStartupTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVersion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BaseMetrics) validateMetrics(formats strfmt.Registry) error { + if swag.IsZero(m.Metrics) { // not required + return nil + } + + for i := 0; i < len(m.Metrics); i++ { + if swag.IsZero(m.Metrics[i]) { // not required + continue + } + + if m.Metrics[i] != nil { + if err := m.Metrics[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metrics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metrics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BaseMetrics) validateOs(formats strfmt.Registry) error { + if swag.IsZero(m.Os) { // not required + return nil + } + + if m.Os != nil { + if err := m.Os.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("os") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("os") + } + return err + } + } + + return nil +} + +func (m *BaseMetrics) validateUtcStartupTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_startup_timestamp", "body", m.UtcStartupTimestamp); err != nil { + return err + } + + return nil +} + +func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { + + if err := validate.Required("version", "body", m.Version); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this base metrics based on the context it is used +func (m *BaseMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMetrics(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateOs(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BaseMetrics) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Metrics); i++ { + + if m.Metrics[i] != nil { + + if swag.IsZero(m.Metrics[i]) { // not required + return nil + } + + if err := m.Metrics[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metrics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metrics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BaseMetrics) contextValidateOs(ctx context.Context, formats strfmt.Registry) error { + + if m.Os != nil { + + if swag.IsZero(m.Os) { // not required + return nil + } + + if err := m.Os.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("os") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("os") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BaseMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BaseMetrics) UnmarshalBinary(b []byte) error { + var res BaseMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/console_options.go b/pkg/models/console_options.go new file mode 100644 index 00000000000..87983ab1762 --- /dev/null +++ b/pkg/models/console_options.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// ConsoleOptions ConsoleOptions +// +// swagger:model ConsoleOptions +type ConsoleOptions []string + +// Validate validates this console options +func (m ConsoleOptions) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this console options based on context it is used +func (m ConsoleOptions) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/detailed_metrics.go b/pkg/models/detailed_metrics.go new file mode 100644 index 00000000000..9e605ed8c88 --- /dev/null +++ b/pkg/models/detailed_metrics.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DetailedMetrics DetailedMetrics +// +// swagger:model DetailedMetrics +type DetailedMetrics struct { + + // items + // Required: true + Items []*MetricsDetailItem `json:"items"` + + // meta + // Required: true + Meta *MetricsMeta `json:"meta"` +} + +// Validate validates this detailed metrics +func (m *DetailedMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateItems(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DetailedMetrics) validateItems(formats strfmt.Registry) error { + + if err := validate.Required("items", "body", m.Items); err != nil { + return err + } + + for i := 0; i < len(m.Items); i++ { + if swag.IsZero(m.Items[i]) { // not required + continue + } + + if m.Items[i] != nil { + if err := m.Items[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *DetailedMetrics) validateMeta(formats strfmt.Registry) error { + + if err := validate.Required("meta", "body", m.Meta); err != nil { + return err + } + + if m.Meta != nil { + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// ContextValidate validate this detailed metrics based on the context it is used +func (m *DetailedMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateItems(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DetailedMetrics) contextValidateItems(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Items); i++ { + + if m.Items[i] != nil { + + if swag.IsZero(m.Items[i]) { // not required + return nil + } + + if err := m.Items[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *DetailedMetrics) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if m.Meta != nil { + + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DetailedMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DetailedMetrics) UnmarshalBinary(b []byte) error { + var res DetailedMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/hub_item.go b/pkg/models/hub_item.go new file mode 100644 index 00000000000..c2bac3702c2 --- /dev/null +++ b/pkg/models/hub_item.go @@ -0,0 +1,56 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// HubItem HubItem +// +// swagger:model HubItem +type HubItem struct { + + // name of the hub item + Name string `json:"name,omitempty"` + + // status of the hub item (official, custom, tainted, etc.) + Status string `json:"status,omitempty"` + + // version of the hub item + Version string `json:"version,omitempty"` +} + +// Validate validates this hub item +func (m *HubItem) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this hub item based on context it is used +func (m *HubItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HubItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HubItem) UnmarshalBinary(b []byte) error { + var res HubItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/hub_items.go b/pkg/models/hub_items.go new file mode 100644 index 00000000000..82388d5b97e --- /dev/null +++ b/pkg/models/hub_items.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HubItems HubItems +// +// swagger:model HubItems +type HubItems map[string][]HubItem + +// Validate validates this hub items +func (m HubItems) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + + for i := 0; i < len(m[k]); i++ { + + if err := m[k][i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this hub items based on the context it is used +func (m HubItems) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for k := range m { + + for i := 0; i < len(m[k]); i++ { + + if swag.IsZero(m[k][i]) { // not required + return nil + } + + if err := m[k][i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/lapi_metrics.go b/pkg/models/lapi_metrics.go new file mode 100644 index 00000000000..b56d92ef1f8 --- /dev/null +++ b/pkg/models/lapi_metrics.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// LapiMetrics LapiMetrics +// +// swagger:model LapiMetrics +type LapiMetrics struct { + BaseMetrics + + // console options + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *LapiMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.ConsoleOptions = dataAO1.ConsoleOptions + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m LapiMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + } + + dataAO1.ConsoleOptions = m.ConsoleOptions + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this lapi metrics +func (m *LapiMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConsoleOptions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LapiMetrics) validateConsoleOptions(formats strfmt.Registry) error { + + if swag.IsZero(m.ConsoleOptions) { // not required + return nil + } + + if err := m.ConsoleOptions.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// ContextValidate validate this lapi metrics based on the context it is used +func (m *LapiMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConsoleOptions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LapiMetrics) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { + + if err := m.ConsoleOptions.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LapiMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LapiMetrics) UnmarshalBinary(b []byte) error { + var res LapiMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index d167da9b6af..d726f452a16 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -684,6 +684,36 @@ paths: $ref: "#/definitions/ErrorResponse" security: - JWTAuthorizer: [] + /usage-metrics: + post: + description: Post usage metrics from a LP or a bouncer + summary: Send usage metrics + tags: + - Remediation component + - watchers + operationId: usage-metrics + produces: + - application/json + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/AllMetrics' + description: 'All metrics' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/SuccessResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] + - JWTAuthorizer: [] definitions: WatcherRegistrationRequest: title: WatcherRegistrationRequest @@ -994,6 +1024,186 @@ definitions: type: string value: type: string + RemediationComponentsMetrics: + title: RemediationComponentsMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + type: + type: string + description: type of the remediation component + name: + type: string + description: name of the remediation component + last_pull: + type: integer + description: last pull date + LogProcessorsMetrics: + title: LogProcessorsMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + hub_items: + $ref: '#/definitions/HubItems' + datasources: + type: object + description: Number of datasources per type + additionalProperties: + type: integer + name: + type: string + description: name of the log processor + last_push: + type: integer + description: last push date + last_update: + type: integer + description: last update date + required: + - hub_items + - datasources + LapiMetrics: + title: LapiMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + console_options: + $ref: '#/definitions/ConsoleOptions' + AllMetrics: + title: AllMetrics + type: object + properties: + remediation_components: + type: array + items: + $ref: '#/definitions/RemediationComponentsMetrics' + description: remediation components metrics + log_processors: + type: array + items: + $ref: '#/definitions/LogProcessorsMetrics' + description: log processors metrics + lapi: + $ref: '#/definitions/LapiMetrics' + BaseMetrics: + title: BaseMetrics + type: object + properties: + version: + type: string + description: version of the remediation component + os: + $ref: '#/definitions/OSversion' + metrics: + type: array + items: + $ref: '#/definitions/DetailedMetrics' + description: metrics details + feature_flags: + type: array + items: + type: string + description: feature flags (expected to be empty for remediation components) + utc_startup_timestamp: + type: integer + description: UTC timestamp of the startup of the software + required: + - version + - utc_startup_timestamp + OSversion: + title: OSversion + type: object + properties: + name: + type: string + description: name of the OS + version: + type: string + description: version of the OS + required: + - name + - version + DetailedMetrics: + type: object + title: DetailedMetrics + properties: + items: + type: array + items: + $ref: '#/definitions/MetricsDetailItem' + meta: + $ref: '#/definitions/MetricsMeta' + required: + - meta + - items + MetricsDetailItem: + title: MetricsDetailItem + type: object + properties: + name: + type: string + description: name of the metric + value: + type: number + description: value of the metric + unit: + type: string + description: unit of the metric + labels: + $ref: '#/definitions/MetricsLabels' + description: labels of the metric + required: + - name + - value + - unit + MetricsMeta: + title: MetricsMeta + type: object + properties: + window_size_seconds: + type: integer + description: Size, in seconds, of the window used to compute the metric + utc_now_timestamp: + type: integer + description: UTC timestamp of the current time + required: + - window_size_seconds + - utc_now_timestamp + MetricsLabels: + title: MetricsLabels + type: object + additionalProperties: + type: string + description: label of the metric + ConsoleOptions: + title: ConsoleOptions + type: array + items: + type: string + description: enabled console options + HubItems: + title: HubItems + type: object + additionalProperties: + type: array + items: + $ref: '#/definitions/HubItem' + HubItem: + title: HubItem + type: object + properties: + name: + type: string + description: name of the hub item + version: + type: string + description: version of the hub item + status: + type: string + description: status of the hub item (official, custom, tainted, etc.) ErrorResponse: type: "object" required: @@ -1007,6 +1217,16 @@ definitions: description: "more detail on individual errors" title: "error response" description: "error response return by the API" + SuccessResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "message" + title: "success response" + description: "success response return by the API" tags: - name: Remediation component description: 'Operations about decisions : bans, captcha, rate-limit etc.' diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go new file mode 100644 index 00000000000..05b688fb994 --- /dev/null +++ b/pkg/models/log_processors_metrics.go @@ -0,0 +1,219 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogProcessorsMetrics LogProcessorsMetrics +// +// swagger:model LogProcessorsMetrics +type LogProcessorsMetrics struct { + BaseMetrics + + // Number of datasources per type + // Required: true + Datasources map[string]int64 `json:"datasources"` + + // hub items + // Required: true + HubItems HubItems `json:"hub_items"` + + // last push date + LastPush int64 `json:"last_push,omitempty"` + + // last update date + LastUpdate int64 `json:"last_update,omitempty"` + + // name of the log processor + Name string `json:"name,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + Datasources map[string]int64 `json:"datasources"` + + HubItems HubItems `json:"hub_items"` + + LastPush int64 `json:"last_push,omitempty"` + + LastUpdate int64 `json:"last_update,omitempty"` + + Name string `json:"name,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.Datasources = dataAO1.Datasources + + m.HubItems = dataAO1.HubItems + + m.LastPush = dataAO1.LastPush + + m.LastUpdate = dataAO1.LastUpdate + + m.Name = dataAO1.Name + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + Datasources map[string]int64 `json:"datasources"` + + HubItems HubItems `json:"hub_items"` + + LastPush int64 `json:"last_push,omitempty"` + + LastUpdate int64 `json:"last_update,omitempty"` + + Name string `json:"name,omitempty"` + } + + dataAO1.Datasources = m.Datasources + + dataAO1.HubItems = m.HubItems + + dataAO1.LastPush = m.LastPush + + dataAO1.LastUpdate = m.LastUpdate + + dataAO1.Name = m.Name + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this log processors metrics +func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDatasources(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHubItems(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogProcessorsMetrics) validateDatasources(formats strfmt.Registry) error { + + if err := validate.Required("datasources", "body", m.Datasources); err != nil { + return err + } + + return nil +} + +func (m *LogProcessorsMetrics) validateHubItems(formats strfmt.Registry) error { + + if err := validate.Required("hub_items", "body", m.HubItems); err != nil { + return err + } + + if m.HubItems != nil { + if err := m.HubItems.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hub_items") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("hub_items") + } + return err + } + } + + return nil +} + +// ContextValidate validate this log processors metrics based on the context it is used +func (m *LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateHubItems(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogProcessorsMetrics) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { + + if err := m.HubItems.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hub_items") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("hub_items") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogProcessorsMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogProcessorsMetrics) UnmarshalBinary(b []byte) error { + var res LogProcessorsMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go new file mode 100644 index 00000000000..889f7e263d2 --- /dev/null +++ b/pkg/models/metrics_detail_item.go @@ -0,0 +1,158 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MetricsDetailItem MetricsDetailItem +// +// swagger:model MetricsDetailItem +type MetricsDetailItem struct { + + // labels of the metric + Labels MetricsLabels `json:"labels,omitempty"` + + // name of the metric + // Required: true + Name *string `json:"name"` + + // unit of the metric + // Required: true + Unit *string `json:"unit"` + + // value of the metric + // Required: true + Value *float64 `json:"value"` +} + +// Validate validates this metrics detail item +func (m *MetricsDetailItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLabels(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUnit(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsDetailItem) validateLabels(formats strfmt.Registry) error { + if swag.IsZero(m.Labels) { // not required + return nil + } + + if m.Labels != nil { + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + } + + return nil +} + +func (m *MetricsDetailItem) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *MetricsDetailItem) validateUnit(formats strfmt.Registry) error { + + if err := validate.Required("unit", "body", m.Unit); err != nil { + return err + } + + return nil +} + +func (m *MetricsDetailItem) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this metrics detail item based on the context it is used +func (m *MetricsDetailItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsDetailItem) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if swag.IsZero(m.Labels) { // not required + return nil + } + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsDetailItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsDetailItem) UnmarshalBinary(b []byte) error { + var res MetricsDetailItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_labels.go b/pkg/models/metrics_labels.go new file mode 100644 index 00000000000..d807a88bc8d --- /dev/null +++ b/pkg/models/metrics_labels.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// MetricsLabels MetricsLabels +// +// swagger:model MetricsLabels +type MetricsLabels map[string]string + +// Validate validates this metrics labels +func (m MetricsLabels) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this metrics labels based on context it is used +func (m MetricsLabels) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/metrics_meta.go b/pkg/models/metrics_meta.go new file mode 100644 index 00000000000..b021617e4d9 --- /dev/null +++ b/pkg/models/metrics_meta.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MetricsMeta MetricsMeta +// +// swagger:model MetricsMeta +type MetricsMeta struct { + + // UTC timestamp of the current time + // Required: true + UtcNowTimestamp *int64 `json:"utc_now_timestamp"` + + // Size, in seconds, of the window used to compute the metric + // Required: true + WindowSizeSeconds *int64 `json:"window_size_seconds"` +} + +// Validate validates this metrics meta +func (m *MetricsMeta) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateUtcNowTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateWindowSizeSeconds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsMeta) validateUtcNowTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_now_timestamp", "body", m.UtcNowTimestamp); err != nil { + return err + } + + return nil +} + +func (m *MetricsMeta) validateWindowSizeSeconds(formats strfmt.Registry) error { + + if err := validate.Required("window_size_seconds", "body", m.WindowSizeSeconds); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this metrics meta based on context it is used +func (m *MetricsMeta) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsMeta) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsMeta) UnmarshalBinary(b []byte) error { + var res MetricsMeta + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go new file mode 100644 index 00000000000..eb670409c90 --- /dev/null +++ b/pkg/models/o_sversion.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OSversion OSversion +// +// swagger:model OSversion +type OSversion struct { + + // name of the OS + // Required: true + Name *string `json:"name"` + + // version of the OS + // Required: true + Version *string `json:"version"` +} + +// Validate validates this o sversion +func (m *OSversion) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVersion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OSversion) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *OSversion) validateVersion(formats strfmt.Registry) error { + + if err := validate.Required("version", "body", m.Version); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this o sversion based on context it is used +func (m *OSversion) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *OSversion) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OSversion) UnmarshalBinary(b []byte) error { + var res OSversion + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/remediation_components_metrics.go b/pkg/models/remediation_components_metrics.go new file mode 100644 index 00000000000..ba3845d872a --- /dev/null +++ b/pkg/models/remediation_components_metrics.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RemediationComponentsMetrics RemediationComponentsMetrics +// +// swagger:model RemediationComponentsMetrics +type RemediationComponentsMetrics struct { + BaseMetrics + + // last pull date + LastPull int64 `json:"last_pull,omitempty"` + + // name of the remediation component + Name string `json:"name,omitempty"` + + // type of the remediation component + Type string `json:"type,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *RemediationComponentsMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + LastPull int64 `json:"last_pull,omitempty"` + + Name string `json:"name,omitempty"` + + Type string `json:"type,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.LastPull = dataAO1.LastPull + + m.Name = dataAO1.Name + + m.Type = dataAO1.Type + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m RemediationComponentsMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + LastPull int64 `json:"last_pull,omitempty"` + + Name string `json:"name,omitempty"` + + Type string `json:"type,omitempty"` + } + + dataAO1.LastPull = m.LastPull + + dataAO1.Name = m.Name + + dataAO1.Type = m.Type + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this remediation components metrics +func (m *RemediationComponentsMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this remediation components metrics based on the context it is used +func (m *RemediationComponentsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *RemediationComponentsMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RemediationComponentsMetrics) UnmarshalBinary(b []byte) error { + var res RemediationComponentsMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/success_response.go b/pkg/models/success_response.go new file mode 100644 index 00000000000..e8fc281c090 --- /dev/null +++ b/pkg/models/success_response.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SuccessResponse success response +// +// success response return by the API +// +// swagger:model SuccessResponse +type SuccessResponse struct { + + // message + // Required: true + Message *string `json:"message"` +} + +// Validate validates this success response +func (m *SuccessResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SuccessResponse) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this success response based on context it is used +func (m *SuccessResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SuccessResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SuccessResponse) UnmarshalBinary(b []byte) error { + var res SuccessResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} From 9cebcf96b4f8465c29e69b53cd9b872d5d99223e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:20:20 +0200 Subject: [PATCH 207/318] command cscli [machines|bouncers] inspect (#3103) * command cscli [machines|bouncers] inspect * lint --- .golangci.yml | 2 + cmd/crowdsec-cli/bouncers.go | 217 ++++++++++++++++---- cmd/crowdsec-cli/bouncers_table.go | 33 ---- cmd/crowdsec-cli/machines.go | 304 +++++++++++++++++++++++++---- cmd/crowdsec-cli/machines_table.go | 33 ---- cmd/crowdsec-cli/support.go | 22 +-- pkg/database/ent/helpers.go | 55 ++++++ pkg/database/ent/schema/machine.go | 1 + test/bats/30_machines.bats | 7 +- 9 files changed, 527 insertions(+), 147 deletions(-) delete mode 100644 cmd/crowdsec-cli/bouncers_table.go delete mode 100644 cmd/crowdsec-cli/machines_table.go create mode 100644 pkg/database/ent/helpers.go diff --git a/.golangci.yml b/.golangci.yml index 66c720381de..855c73f9af3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -143,6 +143,8 @@ linters-settings: disabled: true - name: struct-tag disabled: true + - name: redundant-import-alias + disabled: true - name: time-equal disabled: true - name: var-naming diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 3da9575146e..0673473d72a 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "os" "slices" "strings" @@ -12,12 +13,16 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -79,13 +84,92 @@ Note: This command requires database direct access, so is intended to be run on cmd.AddCommand(cli.newAddCmd()) cmd.AddCommand(cli.newDeleteCmd()) cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) return cmd } -func (cli *cliBouncers) list() error { - out := color.Output +func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { + t := newLightTable(out).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) + + for _, b := range bouncers { + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) + } + + fmt.Fprintln(out, t.Render()) +} + +// bouncerInfo contains only the data we want for inspect/list +type bouncerInfo struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + IPAddress string `json:"ip_address"` + Type string `json:"type"` + Version string `json:"version"` + LastPull *time.Time `json:"last_pull"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` +} + +func newBouncerInfo(b *ent.Bouncer) bouncerInfo { + return bouncerInfo{ + CreatedAt: b.CreatedAt, + UpdatedAt: b.UpdatedAt, + Name: b.Name, + Revoked: b.Revoked, + IPAddress: b.IPAddress, + Type: b.Type, + Version: b.Version, + LastPull: b.LastPull, + AuthType: b.AuthType, + OS: b.GetOSNameAndVersion(), + Featureflags: b.GetFeatureFlagList(), + } +} + +func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { + csvwriter := csv.NewWriter(out) + + if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, b := range bouncers { + valid := "validated" + if b.Revoked { + valid = "pending" + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + return nil +} + + +func (cli *cliBouncers) list(out io.Writer) error { bouncers, err := cli.db.ListBouncers() if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) @@ -93,40 +177,23 @@ func (cli *cliBouncers) list() error { switch cli.cfg().Cscli.Output { case "human": - getBouncersTable(out, bouncers) + cli.listHuman(out, bouncers) case "json": + info := make([]bouncerInfo, 0, len(bouncers)) + for _, b := range bouncers { + info = append(info, newBouncerInfo(b)) + } + enc := json.NewEncoder(out) enc.SetIndent("", " ") - if err := enc.Encode(bouncers); err != nil { - return fmt.Errorf("failed to marshal: %w", err) + if err := enc.Encode(info); err != nil { + return errors.New("failed to marshal") } return nil case "raw": - csvwriter := csv.NewWriter(out) - - if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { - return fmt.Errorf("failed to write raw header: %w", err) - } - - for _, b := range bouncers { - valid := "validated" - if b.Revoked { - valid = "pending" - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { - return fmt.Errorf("failed to write raw: %w", err) - } - } - - csvwriter.Flush() + return cli.listCSV(out, bouncers) } return nil @@ -140,7 +207,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() + return cli.list(color.Output) }, } @@ -206,13 +273,14 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - // need to load config and db because PersistentPreRunE is not called for completions - +// validBouncerID returns a list of bouncer IDs for command completion +func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error cfg := cli.cfg() + // need to load config and db because PersistentPreRunE is not called for completions + if err = require.LAPI(cfg); err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -261,7 +329,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: cli.deleteValid, + ValidArgsFunction: cli.validBouncerID, RunE: func(_ *cobra.Command, args []string) error { return cli.delete(args) }, @@ -292,7 +360,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { return nil } - getBouncersTable(color.Output, bouncers) + cli.listHuman(color.Output, bouncers) if !force { if yes, err := askYesNo( @@ -341,3 +409,84 @@ cscli bouncers prune -d 45m --force`, return cmd } + +func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { + t := newTable(out).Writer + + t.SetTitle("Bouncer: " + bouncer.Name) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.String() + } + + t.AppendRows([]table.Row{ + {"Created At", bouncer.CreatedAt}, + {"Last Update", bouncer.UpdatedAt}, + {"Revoked?", bouncer.Revoked}, + {"IP Address", bouncer.IPAddress}, + {"Type", bouncer.Type}, + {"Version", bouncer.Version}, + {"Last Pull", lastPull}, + {"Auth type", bouncer.AuthType}, + {"OS", bouncer.GetOSNameAndVersion()}, + }) + + for _, ff := range bouncer.GetFeatureFlagList() { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + fmt.Fprintln(out, t.Render()) +} + +func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, bouncer) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { + return errors.New("failed to marshal") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + return nil +} + + +func (cli *cliBouncers) newInspectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect [bouncer_name]", + Short: "inspect a bouncer by name", + Example: `cscli bouncers inspect "bouncer1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + bouncerName := args[0] + + b, err := cli.db.Ent.Bouncer.Query(). + Where(bouncer.Name(bouncerName)). + Only(cmd.Context()) + if err != nil { + return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) + } + + return cli.inspect(b) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go deleted file mode 100644 index c32762ba266..00000000000 --- a/cmd/crowdsec-cli/bouncers_table.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "io" - "time" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { - t := newLightTable(out) - t.SetHeaders("Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - - for _, b := range bouncers { - revoked := emoji.CheckMark - if b.Revoked { - revoked = emoji.Prohibited - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - t.AddRow(b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType) - } - - t.Render() -} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 746045d0eab..8796d3de9b8 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/big" "os" "slices" @@ -16,6 +17,7 @@ import ( "github.com/fatih/color" "github.com/go-openapi/strfmt" "github.com/google/uuid" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" @@ -24,8 +26,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -147,13 +151,126 @@ Note: This command requires database direct access, so is intended to be run on cmd.AddCommand(cli.newDeleteCmd()) cmd.AddCommand(cli.newValidateCmd()) cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) return cmd } -func (cli *cliMachines) list() error { - out := color.Output +func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { + state := machine.Hubstate + + if len(state) == 0 { + fmt.Println("No hub items found for this machine") + return + } + + // group state rows by type for multiple tables + rowsByType := make(map[string][]table.Row) + + for itemType, items := range state { + for _, item := range items { + if _, ok := rowsByType[itemType]; !ok { + rowsByType[itemType] = make([]table.Row, 0) + } + + row := table.Row{item.Name, item.Status, item.Version} + rowsByType[itemType] = append(rowsByType[itemType], row) + } + } + + for itemType, rows := range rowsByType { + t := newTable(out).Writer + t.AppendHeader(table.Row{"Name", "Status", "Version"}) + t.SetTitle(itemType) + t.AppendRows(rows) + fmt.Fprintln(out, t.Render()) + } +} + +func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { + t := newLightTable(out).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) + + for _, m := range machines { + validated := emoji.Prohibited + if m.IsValidated { + validated = emoji.CheckMark + } + + hb, active := getLastHeartbeat(m) + if !active { + hb = emoji.Warning + " " + hb + } + + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.GetOSNameAndVersion(), m.AuthType, hb}) + } + + fmt.Fprintln(out, t.Render()) +} + +// machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. +type machineInfo struct { + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + LastPush *time.Time `json:"last_push,omitempty"` + LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` + MachineId string `json:"machineId,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + Version string `json:"version,omitempty"` + IsValidated bool `json:"isValidated,omitempty"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` + Datasources map[string]int64 `json:"datasources,omitempty"` +} + +func newMachineInfo(m *ent.Machine) machineInfo { + return machineInfo{ + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + LastPush: m.LastPush, + LastHeartbeat: m.LastHeartbeat, + MachineId: m.MachineId, + IpAddress: m.IpAddress, + Version: m.Version, + IsValidated: m.IsValidated, + AuthType: m.AuthType, + OS: m.GetOSNameAndVersion(), + Featureflags: m.GetFeatureFlagList(), + Datasources: m.Datasources, + } +} + +func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + for _, m := range machines { + validated := "false" + if m.IsValidated { + validated = "true" + } + + hb := "-" + if m.LastHeartbeat != nil { + hb = m.LastHeartbeat.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + + return nil +} +func (cli *cliMachines) list(out io.Writer) error { machines, err := cli.db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %w", err) @@ -161,38 +278,23 @@ func (cli *cliMachines) list() error { switch cli.cfg().Cscli.Output { case "human": - getAgentsTable(out, machines) + cli.listHuman(out, machines) case "json": + info := make([]machineInfo, 0, len(machines)) + for _, m := range machines { + info = append(info, newMachineInfo(m)) + } + enc := json.NewEncoder(out) enc.SetIndent("", " ") - if err := enc.Encode(machines); err != nil { + if err := enc.Encode(info); err != nil { return errors.New("failed to marshal") } return nil case "raw": - csvwriter := csv.NewWriter(out) - - err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } - - for _, m := range machines { - validated := "false" - if m.IsValidated { - validated = "true" - } - - hb, _ := getLastHeartbeat(m) - - if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } - } - - csvwriter.Flush() + return cli.listCSV(out, machines) } return nil @@ -207,7 +309,7 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() + return cli.list(color.Output) }, } @@ -349,13 +451,14 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - // need to load config and db because PersistentPreRunE is not called for completions - +// validMachineID returns a list of machine IDs for command completion +func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error cfg := cli.cfg() + // need to load config and db because PersistentPreRunE is not called for completions + if err = require.LAPI(cfg); err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -405,7 +508,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: cli.deleteValid, + ValidArgsFunction: cli.validMachineID, RunE: func(_ *cobra.Command, args []string) error { return cli.delete(args) }, @@ -417,7 +520,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. " + + "The duration you provided is less than 2 minutes. "+ "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { return err } else if !yes { @@ -442,11 +545,11 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b return nil } - getAgentsTable(color.Output, machines) + cli.listHuman(color.Output, machines) if !force { if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above machines from the database. " + + "You are about to PERMANENTLY remove the above machines from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err } else if !yes { @@ -460,7 +563,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b return fmt.Errorf("unable to prune machines: %w", err) } - fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) + fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) return nil } @@ -521,3 +624,136 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { return cmd } + +func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := newTable(out).Writer + + t.SetTitle("Machine: " + machine.MachineId) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + t.AppendRows([]table.Row{ + {"IP Address", machine.IpAddress}, + {"Created At", machine.CreatedAt}, + {"Last Update", machine.UpdatedAt}, + {"Last Heartbeat", machine.LastHeartbeat}, + {"Validated?", machine.IsValidated}, + {"CrowdSec version", machine.Version}, + {"OS", machine.GetOSNameAndVersion()}, + {"Auth type", machine.AuthType}, + }) + + for dsName, dsCount := range machine.Datasources { + t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) + } + + for _, ff := range machine.GetFeatureFlagList() { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { + t.AppendRow(table.Row{"Collections", coll.Name}) + } + + fmt.Fprintln(out, t.Render()) +} + +func (cli *cliMachines) inspect(machine *ent.Machine) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newMachineInfo(machine)); err != nil { + return errors.New("failed to marshal") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + + return nil +} + +func (cli *cliMachines) inspectHub(machine *ent.Machine) error { + out := color.Output + + switch cli.cfg().Cscli.Output { + case "human": + cli.inspectHubHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(machine.Hubstate); err != nil { + return errors.New("failed to marshal") + } + + return nil + case "raw": + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"type", "name", "status", "version"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + rows := make([][]string, 0) + + for itemType, items := range machine.Hubstate { + for _, item := range items { + rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) + } + } + + for _, row := range rows { + if err := csvwriter.Write(row); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + } + + return nil +} + +func (cli *cliMachines) newInspectCmd() *cobra.Command { + var showHub bool + + cmd := &cobra.Command{ + Use: "inspect [machine_name]", + Short: "inspect a machine by name", + Example: `cscli machines inspect "machine1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, + RunE: func(_ *cobra.Command, args []string) error { + machineID := args[0] + machine, err := cli.db.QueryMachineByID(machineID) + if err != nil { + return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) + } + + if showHub { + return cli.inspectHub(machine) + } + + return cli.inspect(machine) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") + + return cmd +} diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go deleted file mode 100644 index 18e16bbde3a..00000000000 --- a/cmd/crowdsec-cli/machines_table.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "io" - "time" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -func getAgentsTable(out io.Writer, machines []*ent.Machine) { - t := newLightTable(out) - t.SetHeaders("Name", "IP Address", "Last Update", "Status", "Version", "Auth Type", "Last Heartbeat") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - - for _, m := range machines { - validated := emoji.Prohibited - if m.IsValidated { - validated = emoji.CheckMark - } - - hb, active := getLastHeartbeat(m) - if !active { - hb = emoji.Warning + " " + hb - } - - t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb) - } - - t.Render() -} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 061733ef8d3..1f98768f778 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -193,12 +193,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) - bouncers, err := db.ListBouncers() - if err != nil { - return fmt.Errorf("unable to list bouncers: %w", err) - } - - getBouncersTable(out, bouncers) + // call the "cscli bouncers list" command directly, skip any preRun + cm := cliBouncers{db: db, cfg: cli.cfg} + cm.list(out) stripped := stripAnsiString(out.String()) @@ -216,12 +213,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) - machines, err := db.ListMachines() - if err != nil { - return fmt.Errorf("unable to list machines: %w", err) - } - - getAgentsTable(out, machines) + // call the "cscli machines list" command directly, skip any preRun + cm := cliMachines{db: db, cfg: cli.cfg} + cm.list(out) stripped := stripAnsiString(out.String()) @@ -617,6 +611,10 @@ cscli support dump -f /tmp/crowdsec-support.zip Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { + output := cli.cfg().Cscli.Output + if output != "human" { + return fmt.Errorf("output format %s not supported for this command", output) + } return cli.dump(cmd.Context(), outFile) }, } diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go new file mode 100644 index 00000000000..c6cdbd7f32b --- /dev/null +++ b/pkg/database/ent/helpers.go @@ -0,0 +1,55 @@ +package ent + +import ( + "strings" +) + +func (m *Machine) GetOSNameAndVersion() string { + ret := m.Osname + if m.Osversion != "" { + if ret != "" { + ret += "/" + } + + ret += m.Osversion + } + + if ret == "" { + return "?" + } + + return ret +} + +func (b *Bouncer) GetOSNameAndVersion() string { + ret := b.Osname + if b.Osversion != "" { + if ret != "" { + ret += "/" + } + + ret += b.Osversion + } + + if ret == "" { + return "?" + } + + return ret +} + +func (m *Machine) GetFeatureFlagList() []string { + if m.Featureflags == "" { + return nil + } + + return strings.Split(m.Featureflags, ",") +} + +func (b *Bouncer) GetFeatureFlagList() []string { + if b.Featureflags == "" { + return nil + } + + return strings.Split(b.Featureflags, ",") +} diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 1566cf70b32..5b68f61b1a0 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -10,6 +10,7 @@ import ( // ItemState is defined here instead of using pkg/models/HubItem to avoid introducing a dependency type ItemState struct { + Name string `json:"name,omitempty"` Status string `json:"status,omitempty"` Version string `json:"version,omitempty"` } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 1af5e97dcb4..f8b63fb3173 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,7 +62,7 @@ teardown() { assert_output 1 } -@test "machines delete has autocompletion" { +@test "machines [delete|inspect] has autocompletion" { rune -0 cscli machines add -a -f /dev/null foo1 rune -0 cscli machines add -a -f /dev/null foo2 rune -0 cscli machines add -a -f /dev/null bar @@ -72,6 +72,11 @@ teardown() { assert_line --index 1 'foo2' refute_line 'bar' refute_line 'baz' + rune -0 cscli __complete machines inspect 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' } @test "heartbeat is initially null" { From 90e3299373c81e4b755becd475a1708f0fe424c9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 4 Jul 2024 13:42:02 +0200 Subject: [PATCH 208/318] cscli refact: extract table wrapper to own package (#3105) * cscli refact: extract table wrapper to own package * lint --- cmd/crowdsec-cli/alerts.go | 14 ++- cmd/crowdsec-cli/alerts_table.go | 13 +- cmd/crowdsec-cli/bouncers.go | 5 +- cmd/crowdsec-cli/console.go | 2 +- cmd/crowdsec-cli/console_table.go | 10 +- .../{prettytable.go => cstable/cstable.go} | 46 +++++-- cmd/crowdsec-cli/decisions_table.go | 3 +- cmd/crowdsec-cli/hub.go | 2 +- cmd/crowdsec-cli/hubtest.go | 27 ++++- cmd/crowdsec-cli/hubtest_table.go | 44 +++---- cmd/crowdsec-cli/item_metrics.go | 10 +- cmd/crowdsec-cli/itemcli.go | 4 +- cmd/crowdsec-cli/items.go | 8 +- cmd/crowdsec-cli/machines.go | 11 +- cmd/crowdsec-cli/metrics.go | 12 +- cmd/crowdsec-cli/metrics_table.go | 113 +++++++++--------- cmd/crowdsec-cli/notifications.go | 2 +- cmd/crowdsec-cli/notifications_table.go | 12 +- cmd/crowdsec-cli/support.go | 4 +- cmd/crowdsec-cli/table/align.go | 12 -- cmd/crowdsec-cli/tables.go | 32 ----- cmd/crowdsec-cli/utils_table.go | 32 ++--- 22 files changed, 218 insertions(+), 200 deletions(-) rename cmd/crowdsec-cli/{prettytable.go => cstable/cstable.go} (77%) delete mode 100644 cmd/crowdsec-cli/table/align.go delete mode 100644 cmd/crowdsec-cli/tables.go diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 0fe9852519c..0bb310739d9 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwversion" @@ -54,7 +55,8 @@ func DecisionsFromAlert(alert *models.Alert) string { } func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { - switch cli.cfg().Cscli.Output { + cfg := cli.cfg() + switch cfg.Cscli.Output { case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} @@ -104,7 +106,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi return nil } - alertsTable(color.Output, alerts, printMachine) + alertsTable(color.Output, cfg.Cscli.Color, alerts, printMachine) } return nil @@ -138,7 +140,9 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return err } - alertDecisionsTable(color.Output, alert) + cfg := cli.cfg() + + alertDecisionsTable(color.Output, cfg.Cscli.Color, alert) if len(alert.Meta) > 0 { fmt.Printf("\n - Context :\n") @@ -146,7 +150,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return alert.Meta[i].Key < alert.Meta[j].Key }) - table := newTable(color.Output) + table := cstable.New(color.Output, cfg.Cscli.Color) table.SetRowLines(false) table.SetHeaders("Key", "Value") @@ -171,7 +175,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro fmt.Printf("\n - Events :\n") for _, event := range alert.Events { - alertEventTable(color.Output, event) + alertEventTable(color.Output, cfg.Cscli.Color, event) } } diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/alerts_table.go index fbde4d2aaa9..29383457ced 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/alerts_table.go @@ -9,11 +9,12 @@ import ( log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) -func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := newTable(out) +func alertsTable(out io.Writer, wantColor string, alerts *models.GetAlertsResponse, printMachine bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) header := []string{"ID", "value", "reason", "country", "as", "decisions", "created_at"} @@ -51,9 +52,9 @@ func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine b t.Render() } -func alertDecisionsTable(out io.Writer, alert *models.Alert) { +func alertDecisionsTable(out io.Writer, wantColor string, alert *models.Alert) { foundActive := false - t := newTable(out) + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("ID", "scope:value", "action", "expiration", "created_at") @@ -90,10 +91,10 @@ func alertDecisionsTable(out io.Writer, alert *models.Alert) { } } -func alertEventTable(out io.Writer, event *models.Event) { +func alertEventTable(out io.Writer, wantColor string, event *models.Event) { fmt.Fprintf(out, "\n- Date: %s\n", *event.Timestamp) - t := newTable(out) + t := cstable.New(out, wantColor) t.SetHeaders("Key", "Value") sort.Slice(event.Meta, func(i, j int) bool { return event.Meta[i].Key < event.Meta[j].Key diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 0673473d72a..d304b1b7867 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -17,6 +17,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -90,7 +91,7 @@ Note: This command requires database direct access, so is intended to be run on } func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { - t := newLightTable(out).Writer + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) for _, b := range bouncers { @@ -411,7 +412,7 @@ cscli bouncers prune -d 45m --force`, } func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { - t := newTable(out).Writer + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer t.SetTitle("Bouncer: " + bouncer.Name) diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 3c7df395b30..979c9f0ea60 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -276,7 +276,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { consoleCfg := cfg.API.Server.ConsoleConfig switch cfg.Cscli.Output { case "human": - cmdConsoleStatusTable(color.Output, *consoleCfg) + cmdConsoleStatusTable(color.Output, cfg.Cscli.Color, *consoleCfg) case "json": out := map[string](*bool){ csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions, diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index 4623f3bb62a..94976618573 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -3,17 +3,19 @@ package main import ( "io" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) -func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { - t := newTable(out) +func cmdConsoleStatusTable(out io.Writer, wantColor string, consoleCfg csconfig.ConsoleConfig) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Option Name", "Activated", "Description") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) for _, option := range csconfig.CONSOLE_CONFIGS { activated := emoji.CrossMark diff --git a/cmd/crowdsec-cli/prettytable.go b/cmd/crowdsec-cli/cstable/cstable.go similarity index 77% rename from cmd/crowdsec-cli/prettytable.go rename to cmd/crowdsec-cli/cstable/cstable.go index f17472722f1..f7ddb604d84 100644 --- a/cmd/crowdsec-cli/prettytable.go +++ b/cmd/crowdsec-cli/cstable/cstable.go @@ -1,4 +1,4 @@ -package main +package cstable // transisional file to keep (minimal) backwards compatibility with the old table // we can migrate the code to the new dependency later, it can already use the Writer interface @@ -6,11 +6,36 @@ package main import ( "fmt" "io" + "os" "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" + isatty "github.com/mattn/go-isatty" ) +func RenderTitle(out io.Writer, title string) { + if out == nil { + panic("renderTableTitle: out is nil") + } + + if title == "" { + return + } + + fmt.Fprintln(out, title) +} + +func shouldWeColorize(wantColor string) bool { + switch wantColor { + case "yes": + return true + case "no": + return false + default: + return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) + } +} + type Table struct { Writer table.Writer output io.Writer @@ -18,7 +43,7 @@ type Table struct { alignHeader []text.Align } -func newTable(out io.Writer) *Table { +func New(out io.Writer, wantColor string) *Table { if out == nil { panic("newTable: out is nil") } @@ -26,14 +51,14 @@ func newTable(out io.Writer) *Table { t := table.NewWriter() // colorize output, use unicode box characters - fancy := shouldWeColorize() + fancy := shouldWeColorize(wantColor) - color := table.ColorOptions{} + colorOptions := table.ColorOptions{} if fancy { - color.Header = text.Colors{text.Italic} - color.Border = text.Colors{text.FgHiBlack} - color.Separator = text.Colors{text.FgHiBlack} + colorOptions.Header = text.Colors{text.Italic} + colorOptions.Border = text.Colors{text.FgHiBlack} + colorOptions.Separator = text.Colors{text.FgHiBlack} } // no upper/lower case transformations @@ -46,7 +71,7 @@ func newTable(out io.Writer) *Table { style := table.Style{ Box: box, - Color: color, + Color: colorOptions, Format: format, HTML: table.DefaultHTMLOptions, Options: table.OptionsDefault, @@ -63,8 +88,8 @@ func newTable(out io.Writer) *Table { } } -func newLightTable(output io.Writer) *Table { - t := newTable(output) +func NewLight(output io.Writer, wantColor string) *Table { + t := New(output, wantColor) s := t.Writer.Style() s.Box.Left = "" s.Box.LeftSeparator = "" @@ -100,6 +125,7 @@ func (t *Table) setColumnConfigs() { WidthMaxEnforcer: text.WrapSoft, }) } + t.Writer.SetColumnConfigs(configs) } diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/decisions_table.go index 10021e4dd4b..02952f93b85 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/decisions_table.go @@ -5,11 +5,12 @@ import ( "io" "strconv" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := newTable(out) + t := cstable.New(out, cli.cfg().Cscli.Color) t.SetRowLines(false) header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 737b93d8da8..7e00eb64b33 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -72,7 +72,7 @@ func (cli *cliHub) list(all bool) error { } } - err = listItems(color.Output, cwhub.ItemTypes, items, true, cfg.Cscli.Output) + err = listItems(color.Output, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 8796fa48a17..2a4635d39f1 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -371,7 +371,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { switch cfg.Cscli.Output { case "human": - hubTestResultTable(color.Output, testResult) + hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) case "json": jsonResult := make(map[string][]string, 0) jsonResult["success"] = make([]string, 0) @@ -480,7 +480,7 @@ func (cli *cliHubTest) NewListCmd() *cobra.Command { switch cfg.Cscli.Output { case "human": - hubTestListTable(color.Output, hubPtr.Tests) + hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) case "json": j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") if err != nil { @@ -505,7 +505,9 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp if err := HubTest.LoadAllTests(); err != nil { return fmt.Errorf("unable to load all tests: %+v", err) } + var err error + scenarioCoverage := []hubtest.Coverage{} parserCoverage := []hubtest.Coverage{} appsecRuleCoverage := []hubtest.Coverage{} @@ -521,12 +523,15 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp if err != nil { return fmt.Errorf("while getting parser coverage: %w", err) } + parserTested := 0 + for _, test := range parserCoverage { if test.TestsCount > 0 { parserTested++ } } + parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) } @@ -537,6 +542,7 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp } scenarioTested := 0 + for _, test := range scenarioCoverage { if test.TestsCount > 0 { scenarioTested++ @@ -553,11 +559,13 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp } appsecRuleTested := 0 + for _, test := range appsecRuleCoverage { if test.TestsCount > 0 { appsecRuleTested++ } } + appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) } @@ -572,30 +580,34 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp case showAppsecCov: fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) } + return nil } switch cfg.Cscli.Output { case "human": if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, parserCoverage) + hubTestParserCoverageTable(color.Output, cfg.Cscli.Color, parserCoverage) } if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, scenarioCoverage) + hubTestScenarioCoverageTable(color.Output, cfg.Cscli.Color, scenarioCoverage) } if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, appsecRuleCoverage) + hubTestAppsecRuleCoverageTable(color.Output, cfg.Cscli.Color, appsecRuleCoverage) } fmt.Println() + if showParserCov || showAll { fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) } + if showScenarioCov || showAll { fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) } + if showAppsecCov || showAll { fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) } @@ -604,16 +616,21 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp if err != nil { return err } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") if err != nil { return err } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") if err != nil { return err } + fmt.Printf("%s", dump) default: return errors.New("only human/json output modes are supported") diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go index 1b76e75263e..1fa0f990be2 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -4,16 +4,18 @@ import ( "fmt" "io" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func hubTestResultTable(out io.Writer, testResult map[string]bool) { - t := newLightTable(out) +func hubTestResultTable(out io.Writer, wantColor string, testResult map[string]bool) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Test", "Result") - t.SetHeaderAlignment(table.AlignLeft) - t.SetAlignment(table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft) + t.SetAlignment(text.AlignLeft) for testName, success := range testResult { status := emoji.CheckMarkButton @@ -27,11 +29,11 @@ func hubTestResultTable(out io.Writer, testResult map[string]bool) { t.Render() } -func hubTestListTable(out io.Writer, tests []*hubtest.HubTestItem) { - t := newLightTable(out) +func hubTestListTable(out io.Writer, wantColor string, tests []*hubtest.HubTestItem) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Name", "Path") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) for _, test := range tests { t.AddRow(test.Name, test.Path) @@ -40,11 +42,11 @@ func hubTestListTable(out io.Writer, tests []*hubtest.HubTestItem) { t.Render() } -func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.Coverage) { - t := newLightTable(out) +func hubTestParserCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Parser", "Status", "Number of tests") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) parserTested := 0 @@ -61,11 +63,11 @@ func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.Coverage) { t.Render() } -func hubTestAppsecRuleCoverageTable(out io.Writer, coverage []hubtest.Coverage) { - t := newLightTable(out) +func hubTestAppsecRuleCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Appsec Rule", "Status", "Number of tests") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) parserTested := 0 @@ -82,11 +84,11 @@ func hubTestAppsecRuleCoverageTable(out io.Writer, coverage []hubtest.Coverage) t.Render() } -func hubTestScenarioCoverageTable(out io.Writer, coverage []hubtest.Coverage) { - t := newLightTable(out) +func hubTestScenarioCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Scenario", "Status", "Number of tests") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) parserTested := 0 diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index 9459968790b..b4b8c3c26b5 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -18,23 +18,23 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(prometheusURL string, hubItem *cwhub.Item) error { +func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: metrics := GetParserMetric(prometheusURL, hubItem.Name) - parserMetricsTable(color.Output, hubItem.Name, metrics) + parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: metrics := GetScenarioMetric(prometheusURL, hubItem.Name) - scenarioMetricsTable(color.Output, hubItem.Name, metrics) + scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(prometheusURL, sub); err != nil { + if err := ShowMetrics(prometheusURL, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) - appsecMetricsTable(color.Output, hubItem.Name, metrics) + appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 55396a10995..64c18ae89b1 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -381,7 +381,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl); err != nil { + if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } @@ -442,7 +442,7 @@ func (cli cliItem) list(args []string, all bool) error { return err } - return listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output) + return listItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index 9af432c32c1..b0c03922166 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -54,7 +54,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b return items, nil } -func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { +func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { switch output { case "human": nothingToDisplay := true @@ -64,7 +64,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item continue } - listHubItemTable(out, "\n"+strings.ToUpper(itemType), items[itemType]) + listHubItemTable(out, wantColor, "\n"+strings.ToUpper(itemType), items[itemType]) nothingToDisplay = false } @@ -143,7 +143,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item return nil } -func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string) error { +func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) @@ -174,7 +174,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusUR if showMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(prometheusURL, item); err != nil { + if err := ShowMetrics(prometheusURL, item, wantColor); err != nil { return err } } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 8796d3de9b8..2da5e20229c 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/machineid" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -156,7 +157,7 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { +func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { state := machine.Hubstate if len(state) == 0 { @@ -179,7 +180,7 @@ func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { } for itemType, rows := range rowsByType { - t := newTable(out).Writer + t := cstable.New(out, cli.cfg().Cscli.Color).Writer t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) @@ -188,7 +189,7 @@ func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { } func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { - t := newLightTable(out).Writer + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) for _, m := range machines { @@ -625,8 +626,8 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { return cmd } -func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { - t := newTable(out).Writer +func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := cstable.New(out, cli.cfg().Cscli.Color).Writer t.SetTitle("Machine: " + machine.MachineId) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 7858a7a7b2b..a9457bb849e 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -19,6 +19,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) type ( @@ -49,7 +51,7 @@ var ( ) type metricSection interface { - Table(out io.Writer, noUnit bool, showEmpty bool) + Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) Description() (string, string) } @@ -263,7 +265,7 @@ func NewCLIMetrics(cfg configGetter) *cliMetrics { } } -func (ms metricStore) Format(out io.Writer, sections []string, formatType string, noUnit bool) error { +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { // copy only the sections we want want := map[string]metricSection{} @@ -282,7 +284,7 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string switch formatType { case "human": for _, section := range maptools.SortedKeys(want) { - want[section].Table(out, noUnit, showEmpty) + want[section].Table(out, wantColor, noUnit, showEmpty) } case "json": x, err := json.MarshalIndent(want, "", " ") @@ -331,7 +333,7 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { } } - return ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit) + return ms.Format(color.Output, cfg.Cscli.Color, sections, cfg.Cscli.Output, noUnit) } func (cli *cliMetrics) NewCommand() *cobra.Command { @@ -449,7 +451,7 @@ func (cli *cliMetrics) list() error { switch cli.cfg().Cscli.Output { case "human": - t := newTable(color.Output) + t := cstable.New(color.Output, cli.cfg().Cscli.Color) t.SetRowLines(true) t.SetHeaders("Type", "Title", "Description") diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index f42d5bdaf91..38a4d3bbcc4 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -7,17 +7,18 @@ import ( "sort" "strconv" + "github.com/jedib0t/go-pretty/v6/text" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/maptools" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. var ErrNilTable = errors.New("nil table") -func lapiMetricsToTable(t *Table, stats map[string]map[string]map[string]int) int { +func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count // sort keys to keep consistent order when printing machineKeys := []string{} @@ -55,7 +56,7 @@ func lapiMetricsToTable(t *Table, stats map[string]map[string]map[string]int) in return numRows } -func wlMetricsToTable(t *Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { +func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -93,7 +94,7 @@ func wlMetricsToTable(t *Table, stats map[string]map[string]map[string]int, noUn return numRows, nil } -func metricsToTable(t *Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { +func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -145,11 +146,11 @@ func (s statBucket) Process(bucket, metric string, val int) { s[bucket][metric] += val } -func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} @@ -157,7 +158,7 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -178,11 +179,11 @@ func (s statAcquis) Process(source, metric string, val int) { s[source][metric] += val } -func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} @@ -190,7 +191,7 @@ func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -208,11 +209,11 @@ func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { s[appsecEngine][metric] += val } -func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) keys := []string{"processed", "blocked"} @@ -220,7 +221,7 @@ func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -242,19 +243,19 @@ func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, s[appsecEngine][appsecRule][metric] += val } -func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { +func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { for appsecEngine, appsecEngineRulesStats := range s { - t := newTable(out) + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) + cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) t.Render() } } @@ -277,17 +278,17 @@ func (s statWhitelist) Process(whitelist, reason, metric string, val int) { s[whitelist][reason][metric] += val } -func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -307,11 +308,11 @@ func (s statParser) Process(parser, metric string, val int) { s[parser][metric] += val } -func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := []string{"hits", "parsed", "unparsed"} @@ -319,7 +320,7 @@ func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -339,11 +340,11 @@ func (s statStash) Process(name, mtype string, val int) { } } -func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Name", "Type", "Items") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -363,7 +364,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -381,11 +382,11 @@ func (s statLapi) Process(route, method string, val int) { s[route][method] += val } -func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Route", "Method", "Hits") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -415,7 +416,7 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -437,17 +438,17 @@ func (s statLapiMachine) Process(machine, route, method string, val int) { s[machine][route][method] += val } -func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Machine", "Route", "Method", "Hits") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -469,17 +470,17 @@ func (s statLapiBouncer) Process(bouncer, route, method string, val int) { s[bouncer][route][method] += val } -func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Bouncer", "Route", "Method", "Hits") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -509,11 +510,11 @@ func (s statLapiDecision) Process(bouncer, fam string, val int) { s[bouncer] = x } -func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := 0 @@ -529,7 +530,7 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -552,11 +553,11 @@ func (s statDecision) Process(reason, origin, action string, val int) { s[reason][origin][action] += val } -func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Reason", "Origin", "Action", "Count") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := 0 @@ -577,7 +578,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -591,11 +592,11 @@ func (s statAlert) Process(reason string, val int) { s[reason] += val } -func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Reason", "Count") - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) numRows := 0 @@ -610,7 +611,7 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 768d6a1c47e..4d757fca44e 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -167,7 +167,7 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { } if cfg.Cscli.Output == "human" { - notificationListTable(color.Output, ncfgs) + notificationListTable(color.Output, cfg.Cscli.Color, ncfgs) } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go index b96c8ca4783..2976797bd8a 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/notifications_table.go @@ -5,15 +5,17 @@ import ( "sort" "strings" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) -func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { - t := newLightTable(out) +func notificationListTable(out io.Writer, wantColor string, ncfgs map[string]NotificationsCfg) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Active", "Name", "Type", "Profile name") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := make([]string, 0, len(ncfgs)) for k := range ncfgs { diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 1f98768f778..b026d31412e 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -94,7 +94,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { return err } - if err := ms.Format(humanMetrics, nil, "human", false); err != nil { + if err := ms.Format(humanMetrics, cfg.Cscli.Color, nil, "human", false); err != nil { return fmt.Errorf("could not format prometheus metrics: %w", err) } @@ -173,7 +173,7 @@ func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType str return fmt.Errorf("could not collect %s list: %w", itemType, err) } - if err := listItems(out, []string{itemType}, items, false, "human"); err != nil { + if err := listItems(out, cli.cfg().Cscli.Color, []string{itemType}, items, false, "human"); err != nil { return fmt.Errorf("could not list %s: %w", itemType, err) } diff --git a/cmd/crowdsec-cli/table/align.go b/cmd/crowdsec-cli/table/align.go deleted file mode 100644 index e0582007c57..00000000000 --- a/cmd/crowdsec-cli/table/align.go +++ /dev/null @@ -1,12 +0,0 @@ -package table - -import ( - "github.com/jedib0t/go-pretty/v6/text" -) - -// temporary, backward compatibility only - -const ( - AlignLeft = text.AlignLeft - AlignRight = text.AlignRight -) diff --git a/cmd/crowdsec-cli/tables.go b/cmd/crowdsec-cli/tables.go deleted file mode 100644 index e6dba0c2644..00000000000 --- a/cmd/crowdsec-cli/tables.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - - isatty "github.com/mattn/go-isatty" -) - -func shouldWeColorize() bool { - switch csConfig.Cscli.Color { - case "yes": - return true - case "no": - return false - default: - return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) - } -} - -func renderTableTitle(out io.Writer, title string) { - if out == nil { - panic("renderTableTitle: out is nil") - } - - if title == "" { - return - } - - fmt.Fprintln(out, title) -} diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index d7d26a65c12..c0043e47ee3 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -5,28 +5,30 @@ import ( "io" "strconv" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) -func listHubItemTable(out io.Writer, title string, items []*cwhub.Item) { - t := newLightTable(out) +func listHubItemTable(out io.Writer, wantColor string, title string, items []*cwhub.Item) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) for _, item := range items { status := fmt.Sprintf("%v %s", item.State.Emoji(), item.State.Text()) t.AddRow(item.Name, status, item.State.LocalVersion, item.State.LocalPath) } - renderTableTitle(out, title) + cstable.RenderTitle(out, title) t.Render() } -func appsecMetricsTable(out io.Writer, itemName string, metrics map[string]int) { - t := newTable(out) +func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Inband Hits", "Outband Hits") t.AddRow( @@ -34,16 +36,16 @@ func appsecMetricsTable(out io.Writer, itemName string, metrics map[string]int) strconv.Itoa(metrics["outband_hits"]), ) - renderTableTitle(out, fmt.Sprintf("\n - (AppSec Rule) %s:", itemName)) + cstable.RenderTitle(out, fmt.Sprintf("\n - (AppSec Rule) %s:", itemName)) t.Render() } -func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int) { +func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { if metrics["instantiation"] == 0 { return } - t := newTable(out) + t := cstable.New(out, wantColor) t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") t.AddRow( @@ -54,12 +56,12 @@ func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int strconv.Itoa(metrics["underflow"]), ) - renderTableTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) + cstable.RenderTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) t.Render() } -func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[string]int) { - t := newTable(out) +func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { + t := cstable.New(out, wantColor) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") // don't show table if no hits @@ -79,7 +81,7 @@ func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[s } if showTable { - renderTableTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) + cstable.RenderTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) t.Render() } } From 7cc33b585433b5f94437821b732fc09511a7176a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:43:35 +0200 Subject: [PATCH 209/318] cscli refact: extract metrics to own package (#3106) --- cmd/crowdsec-cli/item_metrics.go | 36 ----------------- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/{ => metrics}/metrics.go | 7 +++- .../{ => metrics}/metrics_table.go | 2 +- cmd/crowdsec-cli/metrics/number.go | 40 +++++++++++++++++++ cmd/crowdsec-cli/support.go | 3 +- 6 files changed, 51 insertions(+), 41 deletions(-) rename cmd/crowdsec-cli/{ => metrics}/metrics.go (98%) rename cmd/crowdsec-cli/{ => metrics}/metrics_table.go (99%) create mode 100644 cmd/crowdsec-cli/metrics/number.go diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index b4b8c3c26b5..f00ae08b00b 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -1,8 +1,6 @@ package main import ( - "fmt" - "math" "net/http" "strconv" "strings" @@ -291,37 +289,3 @@ func GetPrometheusMetric(url string) []*prom2json.Family { return result } - -type unit struct { - value int64 - symbol string -} - -var ranges = []unit{ - {value: 1e18, symbol: "E"}, - {value: 1e15, symbol: "P"}, - {value: 1e12, symbol: "T"}, - {value: 1e9, symbol: "G"}, - {value: 1e6, symbol: "M"}, - {value: 1e3, symbol: "k"}, - {value: 1, symbol: ""}, -} - -func formatNumber(num int) string { - goodUnit := unit{} - - for _, u := range ranges { - if int64(num) >= u.value { - goodUnit = u - break - } - } - - if goodUnit.value == 1 { - return fmt.Sprintf("%d%s", num, goodUnit.symbol) - } - - res := math.Round(float64(num)/float64(goodUnit.value)*100) / 100 - - return fmt.Sprintf("%.2f%s", res, goodUnit.symbol) -} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index a712960810a..bd9d8988132 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,6 +14,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -250,7 +252,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) + cmd.AddCommand(metrics.NewCLI(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics/metrics.go similarity index 98% rename from cmd/crowdsec-cli/metrics.go rename to cmd/crowdsec-cli/metrics/metrics.go index a9457bb849e..967b57de707 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics/metrics.go @@ -1,4 +1,4 @@ -package main +package metrics import ( "encoding/json" @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) type ( @@ -255,11 +256,13 @@ func (ms metricStore) Fetch(url string) error { return nil } +type configGetter func() *csconfig.Config + type cliMetrics struct { cfg configGetter } -func NewCLIMetrics(cfg configGetter) *cliMetrics { +func NewCLI(cfg configGetter) *cliMetrics { return &cliMetrics{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics/metrics_table.go similarity index 99% rename from cmd/crowdsec-cli/metrics_table.go rename to cmd/crowdsec-cli/metrics/metrics_table.go index 38a4d3bbcc4..4e423722f3e 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics/metrics_table.go @@ -1,4 +1,4 @@ -package main +package metrics import ( "errors" diff --git a/cmd/crowdsec-cli/metrics/number.go b/cmd/crowdsec-cli/metrics/number.go new file mode 100644 index 00000000000..a60d3db2a9b --- /dev/null +++ b/cmd/crowdsec-cli/metrics/number.go @@ -0,0 +1,40 @@ +package metrics + +import ( + "fmt" + "math" +) + +type unit struct { + value int64 + symbol string +} + +var ranges = []unit{ + {value: 1e18, symbol: "E"}, + {value: 1e15, symbol: "P"}, + {value: 1e12, symbol: "T"}, + {value: 1e9, symbol: "G"}, + {value: 1e6, symbol: "M"}, + {value: 1e3, symbol: "k"}, + {value: 1, symbol: ""}, +} + +func formatNumber(num int) string { + goodUnit := unit{} + + for _, u := range ranges { + if int64(num) >= u.value { + goodUnit = u + break + } + } + + if goodUnit.value == 1 { + return fmt.Sprintf("%d%s", num, goodUnit.symbol) + } + + res := math.Round(float64(num)/float64(goodUnit.value)*100) / 100 + + return fmt.Sprintf("%.2f%s", res, goodUnit.symbol) +} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index b026d31412e..1b33ef38ada 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -88,7 +89,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { humanMetrics := new(bytes.Buffer) - ms := NewMetricStore() + ms := metrics.NewMetricStore() if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { return err From ea2098f6e6e9a62608fde690234db9f307c5dbce Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:55:47 +0200 Subject: [PATCH 210/318] refact cscli metrics: split stat types to own files (#3107) --- cmd/crowdsec-cli/metrics/list.go | 78 +++ cmd/crowdsec-cli/metrics/metrics.go | 444 ------------- cmd/crowdsec-cli/metrics/metrics_table.go | 617 ------------------- cmd/crowdsec-cli/metrics/show.go | 104 ++++ cmd/crowdsec-cli/metrics/statacquis.go | 45 ++ cmd/crowdsec-cli/metrics/statalert.go | 45 ++ cmd/crowdsec-cli/metrics/statappsecengine.go | 42 ++ cmd/crowdsec-cli/metrics/statappsecrule.go | 48 ++ cmd/crowdsec-cli/metrics/statbucket.go | 43 ++ cmd/crowdsec-cli/metrics/statdecision.go | 60 ++ cmd/crowdsec-cli/metrics/statlapi.go | 67 ++ cmd/crowdsec-cli/metrics/statlapibouncer.go | 43 ++ cmd/crowdsec-cli/metrics/statlapidecision.go | 65 ++ cmd/crowdsec-cli/metrics/statlapimachine.go | 43 ++ cmd/crowdsec-cli/metrics/statparser.go | 44 ++ cmd/crowdsec-cli/metrics/statstash.go | 61 ++ cmd/crowdsec-cli/metrics/statwhitelist.go | 44 ++ cmd/crowdsec-cli/metrics/store.go | 264 ++++++++ cmd/crowdsec-cli/metrics/table.go | 130 ++++ 19 files changed, 1226 insertions(+), 1061 deletions(-) create mode 100644 cmd/crowdsec-cli/metrics/list.go delete mode 100644 cmd/crowdsec-cli/metrics/metrics_table.go create mode 100644 cmd/crowdsec-cli/metrics/show.go create mode 100644 cmd/crowdsec-cli/metrics/statacquis.go create mode 100644 cmd/crowdsec-cli/metrics/statalert.go create mode 100644 cmd/crowdsec-cli/metrics/statappsecengine.go create mode 100644 cmd/crowdsec-cli/metrics/statappsecrule.go create mode 100644 cmd/crowdsec-cli/metrics/statbucket.go create mode 100644 cmd/crowdsec-cli/metrics/statdecision.go create mode 100644 cmd/crowdsec-cli/metrics/statlapi.go create mode 100644 cmd/crowdsec-cli/metrics/statlapibouncer.go create mode 100644 cmd/crowdsec-cli/metrics/statlapidecision.go create mode 100644 cmd/crowdsec-cli/metrics/statlapimachine.go create mode 100644 cmd/crowdsec-cli/metrics/statparser.go create mode 100644 cmd/crowdsec-cli/metrics/statstash.go create mode 100644 cmd/crowdsec-cli/metrics/statwhitelist.go create mode 100644 cmd/crowdsec-cli/metrics/store.go create mode 100644 cmd/crowdsec-cli/metrics/table.go diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/metrics/list.go new file mode 100644 index 00000000000..ba68aa4b64d --- /dev/null +++ b/cmd/crowdsec-cli/metrics/list.go @@ -0,0 +1,78 @@ +package metrics + +import ( + "encoding/json" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +func (cli *cliMetrics) list() error { + type metricType struct { + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` + Description string `json:"description" yaml:"description"` + } + + var allMetrics []metricType + + ms := NewMetricStore() + for _, section := range maptools.SortedKeys(ms) { + title, description := ms[section].Description() + allMetrics = append(allMetrics, metricType{ + Type: section, + Title: title, + Description: description, + }) + } + + switch cli.cfg().Cscli.Output { + case "human": + t := cstable.New(color.Output, cli.cfg().Cscli.Color) + t.SetRowLines(true) + t.SetHeaders("Type", "Title", "Description") + + for _, metric := range allMetrics { + t.AddRow(metric.Type, metric.Title, metric.Description) + } + + t.Render() + case "json": + x, err := json.MarshalIndent(allMetrics, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal metric types: %w", err) + } + + fmt.Println(string(x)) + case "raw": + x, err := yaml.Marshal(allMetrics) + if err != nil { + return fmt.Errorf("failed to marshal metric types: %w", err) + } + + fmt.Println(string(x)) + } + + return nil +} + +func (cli *cliMetrics) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available types of metrics.", + Long: `List available types of metrics.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.list() + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/metrics/metrics.go b/cmd/crowdsec-cli/metrics/metrics.go index 967b57de707..52d623dc37e 100644 --- a/cmd/crowdsec-cli/metrics/metrics.go +++ b/cmd/crowdsec-cli/metrics/metrics.go @@ -1,261 +1,11 @@ package metrics import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" - - "github.com/fatih/color" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/prom2json" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/go-cs-lib/maptools" - "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -type ( - statAcquis map[string]map[string]int - statParser map[string]map[string]int - statBucket map[string]map[string]int - statWhitelist map[string]map[string]map[string]int - statLapi map[string]map[string]int - statLapiMachine map[string]map[string]map[string]int - statLapiBouncer map[string]map[string]map[string]int - statLapiDecision map[string]struct { - NonEmpty int - Empty int - } - statDecision map[string]map[string]map[string]int - statAppsecEngine map[string]map[string]int - statAppsecRule map[string]map[string]map[string]int - statAlert map[string]int - statStash map[string]struct { - Type string - Count int - } -) - -var ( - ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") - ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") -) - -type metricSection interface { - Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) - Description() (string, string) -} - -type metricStore map[string]metricSection - -func NewMetricStore() metricStore { - return metricStore{ - "acquisition": statAcquis{}, - "scenarios": statBucket{}, - "parsers": statParser{}, - "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, - "lapi-bouncer": statLapiBouncer{}, - "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, - "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, - "whitelists": statWhitelist{}, - } -} - -func (ms metricStore) Fetch(url string) error { - mfChan := make(chan *dto.MetricFamily, 1024) - errChan := make(chan error, 1) - - // Start with the DefaultTransport for sane defaults. - transport := http.DefaultTransport.(*http.Transport).Clone() - // Conservatively disable HTTP keep-alives as this program will only - // ever need a single HTTP request. - transport.DisableKeepAlives = true - // Timeout early if the server doesn't even return the headers. - transport.ResponseHeaderTimeout = time.Minute - go func() { - defer trace.CatchPanic("crowdsec/ShowPrometheus") - - err := prom2json.FetchMetricFamilies(url, mfChan, transport) - if err != nil { - errChan <- fmt.Errorf("failed to fetch metrics: %w", err) - return - } - errChan <- nil - }() - - result := []*prom2json.Family{} - for mf := range mfChan { - result = append(result, prom2json.NewFamily(mf)) - } - - if err := <-errChan; err != nil { - return err - } - - log.Debugf("Finished reading metrics output, %d entries", len(result)) - /*walk*/ - - mAcquis := ms["acquisition"].(statAcquis) - mParser := ms["parsers"].(statParser) - mBucket := ms["scenarios"].(statBucket) - mLapi := ms["lapi"].(statLapi) - mLapiMachine := ms["lapi-machine"].(statLapiMachine) - mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) - mLapiDecision := ms["lapi-decisions"].(statLapiDecision) - mDecision := ms["decisions"].(statDecision) - mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) - mAppsecRule := ms["appsec-rule"].(statAppsecRule) - mAlert := ms["alerts"].(statAlert) - mStash := ms["stash"].(statStash) - mWhitelist := ms["whitelists"].(statWhitelist) - - for idx, fam := range result { - if !strings.HasPrefix(fam.Name, "cs_") { - continue - } - - log.Tracef("round %d", idx) - - for _, m := range fam.Metrics { - metric, ok := m.(prom2json.Metric) - if !ok { - log.Debugf("failed to convert metric to prom2json.Metric") - continue - } - - name, ok := metric.Labels["name"] - if !ok { - log.Debugf("no name in Metric %v", metric.Labels) - } - - source, ok := metric.Labels["source"] - if !ok { - log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) - } else { - if srctype, ok := metric.Labels["type"]; ok { - source = srctype + ":" + source - } - } - - value := m.(prom2json.Metric).Value - machine := metric.Labels["machine"] - bouncer := metric.Labels["bouncer"] - - route := metric.Labels["route"] - method := metric.Labels["method"] - - reason := metric.Labels["reason"] - origin := metric.Labels["origin"] - action := metric.Labels["action"] - - appsecEngine := metric.Labels["appsec_engine"] - appsecRule := metric.Labels["rule_name"] - - mtype := metric.Labels["type"] - - fval, err := strconv.ParseFloat(value, 32) - if err != nil { - log.Errorf("Unexpected int value %s : %s", value, err) - } - - ival := int(fval) - - switch fam.Name { - // - // buckets - // - case "cs_bucket_created_total": - mBucket.Process(name, "instantiation", ival) - case "cs_buckets": - mBucket.Process(name, "curr_count", ival) - case "cs_bucket_overflowed_total": - mBucket.Process(name, "overflow", ival) - case "cs_bucket_poured_total": - mBucket.Process(name, "pour", ival) - mAcquis.Process(source, "pour", ival) - case "cs_bucket_underflowed_total": - mBucket.Process(name, "underflow", ival) - // - // parsers - // - case "cs_parser_hits_total": - mAcquis.Process(source, "reads", ival) - case "cs_parser_hits_ok_total": - mAcquis.Process(source, "parsed", ival) - case "cs_parser_hits_ko_total": - mAcquis.Process(source, "unparsed", ival) - case "cs_node_hits_total": - mParser.Process(name, "hits", ival) - case "cs_node_hits_ok_total": - mParser.Process(name, "parsed", ival) - case "cs_node_hits_ko_total": - mParser.Process(name, "unparsed", ival) - // - // whitelists - // - case "cs_node_wl_hits_total": - mWhitelist.Process(name, reason, "hits", ival) - case "cs_node_wl_hits_ok_total": - mWhitelist.Process(name, reason, "whitelisted", ival) - // track as well whitelisted lines at acquis level - mAcquis.Process(source, "whitelisted", ival) - // - // lapi - // - case "cs_lapi_route_requests_total": - mLapi.Process(route, method, ival) - case "cs_lapi_machine_requests_total": - mLapiMachine.Process(machine, route, method, ival) - case "cs_lapi_bouncer_requests_total": - mLapiBouncer.Process(bouncer, route, method, ival) - case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": - mLapiDecision.Process(bouncer, fam.Name, ival) - // - // decisions - // - case "cs_active_decisions": - mDecision.Process(reason, origin, action, ival) - case "cs_alerts": - mAlert.Process(reason, ival) - // - // stash - // - case "cs_cache_size": - mStash.Process(name, mtype, ival) - // - // appsec - // - case "cs_appsec_reqs_total": - mAppsecEngine.Process(appsecEngine, "processed", ival) - case "cs_appsec_block_total": - mAppsecEngine.Process(appsecEngine, "blocked", ival) - case "cs_appsec_rule_hits": - mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival) - default: - log.Debugf("unknown: %+v", fam.Name) - continue - } - } - } - - return nil -} - type configGetter func() *csconfig.Config type cliMetrics struct { @@ -268,77 +18,6 @@ func NewCLI(cfg configGetter) *cliMetrics { } } -func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { - // copy only the sections we want - want := map[string]metricSection{} - - // if explicitly asking for sections, we want to show empty tables - showEmpty := len(sections) > 0 - - // if no sections are specified, we want all of them - if len(sections) == 0 { - sections = maptools.SortedKeys(ms) - } - - for _, section := range sections { - want[section] = ms[section] - } - - switch formatType { - case "human": - for _, section := range maptools.SortedKeys(want) { - want[section].Table(out, wantColor, noUnit, showEmpty) - } - case "json": - x, err := json.MarshalIndent(want, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) - case "raw": - x, err := yaml.Marshal(want) - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) - default: - return fmt.Errorf("unknown format type %s", formatType) - } - - return nil -} - -func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { - cfg := cli.cfg() - - if url != "" { - cfg.Cscli.PrometheusUrl = url - } - - if cfg.Prometheus == nil { - return ErrMissingConfig - } - - if !cfg.Prometheus.Enabled { - return ErrMetricsDisabled - } - - ms := NewMetricStore() - - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { - return err - } - - // any section that we don't have in the store is an error - for _, section := range sections { - if _, ok := ms[section]; !ok { - return fmt.Errorf("unknown metrics type: %s", section) - } - } - - return ms.Format(color.Output, cfg.Cscli.Color, sections, cfg.Cscli.Output, noUnit) -} - func (cli *cliMetrics) NewCommand() *cobra.Command { var ( url string @@ -373,126 +52,3 @@ cscli metrics list`, return cmd } - -// expandAlias returns a list of sections. The input can be a list of sections or alias. -func (cli *cliMetrics) expandAlias(args []string) []string { - ret := []string{} - - for _, section := range args { - switch section { - case "engine": - ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists") - case "lapi": - ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") - case "appsec": - ret = append(ret, "appsec-engine", "appsec-rule") - default: - ret = append(ret, section) - } - } - - return ret -} - -func (cli *cliMetrics) newShowCmd() *cobra.Command { - var ( - url string - noUnit bool - ) - - cmd := &cobra.Command{ - Use: "show [type]...", - Short: "Display all or part of the available metrics.", - Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, - Example: `# Show all Metrics, skip empty tables -cscli metrics show - -# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics -cscli metrics show engine - -# Show some specific metrics, show empty tables, connect to a different url -cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics - -# To list available metric types, use "cscli metrics list" -cscli metrics list; cscli metrics list -o json - -# Show metrics in json format -cscli metrics show acquisition parsers scenarios stash -o json`, - // Positional args are optional - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - args = cli.expandAlias(args) - return cli.show(args, url, noUnit) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&url, "url", "u", "", "Metrics url (http://:/metrics)") - flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") - - return cmd -} - -func (cli *cliMetrics) list() error { - type metricType struct { - Type string `json:"type" yaml:"type"` - Title string `json:"title" yaml:"title"` - Description string `json:"description" yaml:"description"` - } - - var allMetrics []metricType - - ms := NewMetricStore() - for _, section := range maptools.SortedKeys(ms) { - title, description := ms[section].Description() - allMetrics = append(allMetrics, metricType{ - Type: section, - Title: title, - Description: description, - }) - } - - switch cli.cfg().Cscli.Output { - case "human": - t := cstable.New(color.Output, cli.cfg().Cscli.Color) - t.SetRowLines(true) - t.SetHeaders("Type", "Title", "Description") - - for _, metric := range allMetrics { - t.AddRow(metric.Type, metric.Title, metric.Description) - } - - t.Render() - case "json": - x, err := json.MarshalIndent(allMetrics, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) - case "raw": - x, err := yaml.Marshal(allMetrics) - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) - } - - return nil -} - -func (cli *cliMetrics) newListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "List available types of metrics.", - Long: `List available types of metrics.`, - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/metrics/metrics_table.go b/cmd/crowdsec-cli/metrics/metrics_table.go deleted file mode 100644 index 4e423722f3e..00000000000 --- a/cmd/crowdsec-cli/metrics/metrics_table.go +++ /dev/null @@ -1,617 +0,0 @@ -package metrics - -import ( - "errors" - "fmt" - "io" - "sort" - "strconv" - - "github.com/jedib0t/go-pretty/v6/text" - log "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/maptools" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" -) - -// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. -var ErrNilTable = errors.New("nil table") - -func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { - // stats: machine -> route -> method -> count - // sort keys to keep consistent order when printing - machineKeys := []string{} - for k := range stats { - machineKeys = append(machineKeys, k) - } - - sort.Strings(machineKeys) - - numRows := 0 - - for _, machine := range machineKeys { - // oneRow: route -> method -> count - machineRow := stats[machine] - for routeName, route := range machineRow { - for methodName, count := range route { - row := []string{ - machine, - routeName, - methodName, - } - if count != 0 { - row = append(row, strconv.Itoa(count)) - } else { - row = append(row, "-") - } - - t.AddRow(row...) - - numRows++ - } - } - } - - return numRows -} - -func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { - if t == nil { - return 0, ErrNilTable - } - - numRows := 0 - - for _, name := range maptools.SortedKeys(stats) { - for _, reason := range maptools.SortedKeys(stats[name]) { - row := []string{ - name, - reason, - "-", - "-", - } - - for _, action := range maptools.SortedKeys(stats[name][reason]) { - value := stats[name][reason][action] - - switch action { - case "whitelisted": - row[3] = strconv.Itoa(value) - case "hits": - row[2] = strconv.Itoa(value) - default: - log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) - } - } - - t.AddRow(row...) - - numRows++ - } - } - - return numRows, nil -} - -func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { - if t == nil { - return 0, ErrNilTable - } - - numRows := 0 - - for _, alabel := range maptools.SortedKeys(stats) { - astats, ok := stats[alabel] - if !ok { - continue - } - - row := []string{ - alabel, - } - - for _, sl := range keys { - if v, ok := astats[sl]; ok && v != 0 { - numberToShow := strconv.Itoa(v) - if !noUnit { - numberToShow = formatNumber(v) - } - - row = append(row, numberToShow) - } else { - row = append(row, "-") - } - } - - t.AddRow(row...) - - numRows++ - } - - return numRows, nil -} - -func (s statBucket) Description() (string, string) { - return "Scenario Metrics", - `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + - `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` -} - -func (s statBucket) Process(bucket, metric string, val int) { - if _, ok := s[bucket]; !ok { - s[bucket] = make(map[string]int) - } - - s[bucket][metric] += val -} - -func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting scenario stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAcquis) Description() (string, string) { - return "Acquisition Metrics", - `Measures the lines read, parsed, and unparsed per datasource. ` + - `Zero read lines indicate a misconfigured or inactive datasource. ` + - `Zero parsed lines mean the parser(s) failed. ` + - `Non-zero parsed lines are fine as crowdsec selects relevant lines.` -} - -func (s statAcquis) Process(source, metric string, val int) { - if _, ok := s[source]; !ok { - s[source] = make(map[string]int) - } - - s[source][metric] += val -} - -func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting acquis stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAppsecEngine) Description() (string, string) { - return "Appsec Metrics", - `Measures the number of parsed and blocked requests by the AppSec Component.` -} - -func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { - if _, ok := s[appsecEngine]; !ok { - s[appsecEngine] = make(map[string]int) - } - - s[appsecEngine][metric] += val -} - -func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Appsec Engine", "Processed", "Blocked") - t.SetAlignment(text.AlignLeft, text.AlignLeft) - - keys := []string{"processed", "blocked"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting appsec stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAppsecRule) Description() (string, string) { - return "Appsec Rule Metrics", - `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` -} - -func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) { - if _, ok := s[appsecEngine]; !ok { - s[appsecEngine] = make(map[string]map[string]int) - } - - if _, ok := s[appsecEngine][appsecRule]; !ok { - s[appsecEngine][appsecRule] = make(map[string]int) - } - - s[appsecEngine][appsecRule][metric] += val -} - -func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - for appsecEngine, appsecEngineRulesStats := range s { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Rule ID", "Triggered") - t.SetAlignment(text.AlignLeft, text.AlignLeft) - - keys := []string{"triggered"} - - if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { - log.Warningf("while collecting appsec rules stats: %s", err) - } else if numRows > 0 || showEmpty { - cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) - t.Render() - } - } -} - -func (s statWhitelist) Description() (string, string) { - return "Whitelist Metrics", - `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` -} - -func (s statWhitelist) Process(whitelist, reason, metric string, val int) { - if _, ok := s[whitelist]; !ok { - s[whitelist] = make(map[string]map[string]int) - } - - if _, ok := s[whitelist][reason]; !ok { - s[whitelist][reason] = make(map[string]int) - } - - s[whitelist][reason][metric] += val -} - -func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { - log.Warningf("while collecting parsers stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statParser) Description() (string, string) { - return "Parser Metrics", - `Tracks the number of events processed by each parser and indicates success of failure. ` + - `Zero parsed lines means the parer(s) failed. ` + - `Non-zero unparsed lines are fine as crowdsec select relevant lines.` -} - -func (s statParser) Process(parser, metric string, val int) { - if _, ok := s[parser]; !ok { - s[parser] = make(map[string]int) - } - - s[parser][metric] += val -} - -func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - keys := []string{"hits", "parsed", "unparsed"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting parsers stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statStash) Description() (string, string) { - return "Parser Stash Metrics", - `Tracks the status of stashes that might be created by various parsers and scenarios.` -} - -func (s statStash) Process(name, mtype string, val int) { - s[name] = struct { - Type string - Count int - }{ - Type: mtype, - Count: val, - } -} - -func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Name", "Type", "Items") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - numRows := 0 - - for _, alabel := range maptools.SortedKeys(s) { - astats := s[alabel] - - row := []string{ - alabel, - astats.Type, - strconv.Itoa(astats.Count), - } - t.AddRow(row...) - - numRows++ - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapi) Description() (string, string) { - return "Local API Metrics", - `Monitors the requests made to local API routes.` -} - -func (s statLapi) Process(route, method string, val int) { - if _, ok := s[route]; !ok { - s[route] = make(map[string]int) - } - - s[route][method] += val -} - -func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - numRows := 0 - - for _, alabel := range maptools.SortedKeys(s) { - astats := s[alabel] - - subKeys := []string{} - for skey := range astats { - subKeys = append(subKeys, skey) - } - - sort.Strings(subKeys) - - for _, sl := range subKeys { - row := []string{ - alabel, - sl, - strconv.Itoa(astats[sl]), - } - - t.AddRow(row...) - - numRows++ - } - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapiMachine) Description() (string, string) { - return "Local API Machines Metrics", - `Tracks the number of calls to the local API from each registered machine.` -} - -func (s statLapiMachine) Process(machine, route, method string, val int) { - if _, ok := s[machine]; !ok { - s[machine] = make(map[string]map[string]int) - } - - if _, ok := s[machine][route]; !ok { - s[machine][route] = make(map[string]int) - } - - s[machine][route][method] += val -} - -func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Machine", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := lapiMetricsToTable(t, s) - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapiBouncer) Description() (string, string) { - return "Local API Bouncers Metrics", - `Tracks total hits to remediation component related API routes.` -} - -func (s statLapiBouncer) Process(bouncer, route, method string, val int) { - if _, ok := s[bouncer]; !ok { - s[bouncer] = make(map[string]map[string]int) - } - - if _, ok := s[bouncer][route]; !ok { - s[bouncer][route] = make(map[string]int) - } - - s[bouncer][route][method] += val -} - -func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := lapiMetricsToTable(t, s) - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapiDecision) Description() (string, string) { - return "Local API Bouncers Decisions", - `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` -} - -func (s statLapiDecision) Process(bouncer, fam string, val int) { - if _, ok := s[bouncer]; !ok { - s[bouncer] = struct { - NonEmpty int - Empty int - }{} - } - - x := s[bouncer] - - switch fam { - case "cs_lapi_decisions_ko_total": - x.Empty += val - case "cs_lapi_decisions_ok_total": - x.NonEmpty += val - } - - s[bouncer] = x -} - -func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := 0 - - for bouncer, hits := range s { - t.AddRow( - bouncer, - strconv.Itoa(hits.Empty), - strconv.Itoa(hits.NonEmpty), - ) - - numRows++ - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statDecision) Description() (string, string) { - return "Local API Decisions", - `Provides information about all currently active decisions. ` + - `Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` -} - -func (s statDecision) Process(reason, origin, action string, val int) { - if _, ok := s[reason]; !ok { - s[reason] = make(map[string]map[string]int) - } - - if _, ok := s[reason][origin]; !ok { - s[reason][origin] = make(map[string]int) - } - - s[reason][origin][action] += val -} - -func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Origin", "Action", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := 0 - - for reason, origins := range s { - for origin, actions := range origins { - for action, hits := range actions { - t.AddRow( - reason, - origin, - action, - strconv.Itoa(hits), - ) - - numRows++ - } - } - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAlert) Description() (string, string) { - return "Local API Alerts", - `Tracks the total number of past and present alerts for the installed scenarios.` -} - -func (s statAlert) Process(reason string, val int) { - s[reason] += val -} - -func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft) - - numRows := 0 - - for scenario, hits := range s { - t.AddRow( - scenario, - strconv.Itoa(hits), - ) - - numRows++ - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/metrics/show.go new file mode 100644 index 00000000000..46603034f24 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/show.go @@ -0,0 +1,104 @@ +package metrics + +import ( + "errors" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +var ( + ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") + ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") +) + +func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { + cfg := cli.cfg() + + if url != "" { + cfg.Cscli.PrometheusUrl = url + } + + if cfg.Prometheus == nil { + return ErrMissingConfig + } + + if !cfg.Prometheus.Enabled { + return ErrMetricsDisabled + } + + ms := NewMetricStore() + + if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + return err + } + + // any section that we don't have in the store is an error + for _, section := range sections { + if _, ok := ms[section]; !ok { + return fmt.Errorf("unknown metrics type: %s", section) + } + } + + return ms.Format(color.Output, cfg.Cscli.Color, sections, cfg.Cscli.Output, noUnit) +} + +// expandAlias returns a list of sections. The input can be a list of sections or alias. +func expandAlias(args []string) []string { + ret := []string{} + + for _, section := range args { + switch section { + case "engine": + ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists") + case "lapi": + ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") + case "appsec": + ret = append(ret, "appsec-engine", "appsec-rule") + default: + ret = append(ret, section) + } + } + + return ret +} + +func (cli *cliMetrics) newShowCmd() *cobra.Command { + var ( + url string + noUnit bool + ) + + cmd := &cobra.Command{ + Use: "show [type]...", + Short: "Display all or part of the available metrics.", + Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, + Example: `# Show all Metrics, skip empty tables +cscli metrics show + +# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics +cscli metrics show engine + +# Show some specific metrics, show empty tables, connect to a different url +cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics + +# To list available metric types, use "cscli metrics list" +cscli metrics list; cscli metrics list -o json + +# Show metrics in json format +cscli metrics show acquisition parsers scenarios stash -o json`, + // Positional args are optional + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + args = expandAlias(args) + return cli.show(args, url, noUnit) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Metrics url (http://:/metrics)") + flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + + return cmd +} diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/metrics/statacquis.go new file mode 100644 index 00000000000..4a8cf0f8934 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statacquis.go @@ -0,0 +1,45 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAcquis map[string]map[string]int + +func (s statAcquis) Description() (string, string) { + return "Acquisition Metrics", + `Measures the lines read, parsed, and unparsed per datasource. ` + + `Zero read lines indicate a misconfigured or inactive datasource. ` + + `Zero parsed lines mean the parser(s) failed. ` + + `Non-zero parsed lines are fine as crowdsec selects relevant lines.` +} + +func (s statAcquis) Process(source, metric string, val int) { + if _, ok := s[source]; !ok { + s[source] = make(map[string]int) + } + + s[source][metric] += val +} + +func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting acquis stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statalert.go b/cmd/crowdsec-cli/metrics/statalert.go new file mode 100644 index 00000000000..c8055910a3a --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statalert.go @@ -0,0 +1,45 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAlert map[string]int + +func (s statAlert) Description() (string, string) { + return "Local API Alerts", + `Tracks the total number of past and present alerts for the installed scenarios.` +} + +func (s statAlert) Process(reason string, val int) { + s[reason] += val +} + +func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Reason", "Count") + t.SetAlignment(text.AlignLeft, text.AlignLeft) + + numRows := 0 + + for scenario, hits := range s { + t.AddRow( + scenario, + strconv.Itoa(hits), + ) + + numRows++ + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statappsecengine.go b/cmd/crowdsec-cli/metrics/statappsecengine.go new file mode 100644 index 00000000000..18ad03ef03f --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statappsecengine.go @@ -0,0 +1,42 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAppsecEngine map[string]map[string]int + +func (s statAppsecEngine) Description() (string, string) { + return "Appsec Metrics", + `Measures the number of parsed and blocked requests by the AppSec Component.` +} + +func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]int) + } + + s[appsecEngine][metric] += val +} + +func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Appsec Engine", "Processed", "Blocked") + t.SetAlignment(text.AlignLeft, text.AlignLeft) + + keys := []string{"processed", "blocked"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting appsec stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statappsecrule.go b/cmd/crowdsec-cli/metrics/statappsecrule.go new file mode 100644 index 00000000000..17ec28de99e --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statappsecrule.go @@ -0,0 +1,48 @@ +package metrics + +import ( + "fmt" + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAppsecRule map[string]map[string]map[string]int + +func (s statAppsecRule) Description() (string, string) { + return "Appsec Rule Metrics", + `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` +} + +func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]map[string]int) + } + + if _, ok := s[appsecEngine][appsecRule]; !ok { + s[appsecEngine][appsecRule] = make(map[string]int) + } + + s[appsecEngine][appsecRule][metric] += val +} + +func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + for appsecEngine, appsecEngineRulesStats := range s { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Rule ID", "Triggered") + t.SetAlignment(text.AlignLeft, text.AlignLeft) + + keys := []string{"triggered"} + + if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { + log.Warningf("while collecting appsec rules stats: %s", err) + } else if numRows > 0 || showEmpty { + cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) + t.Render() + } + } +} diff --git a/cmd/crowdsec-cli/metrics/statbucket.go b/cmd/crowdsec-cli/metrics/statbucket.go new file mode 100644 index 00000000000..62ca4dee71d --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statbucket.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statBucket map[string]map[string]int + +func (s statBucket) Description() (string, string) { + return "Scenario Metrics", + `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + + `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` +} + +func (s statBucket) Process(bucket, metric string, val int) { + if _, ok := s[bucket]; !ok { + s[bucket] = make(map[string]int) + } + + s[bucket][metric] += val +} + +func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting scenario stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statdecision.go b/cmd/crowdsec-cli/metrics/statdecision.go new file mode 100644 index 00000000000..b1474d95f76 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statdecision.go @@ -0,0 +1,60 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statDecision map[string]map[string]map[string]int + +func (s statDecision) Description() (string, string) { + return "Local API Decisions", + `Provides information about all currently active decisions. ` + + `Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` +} + +func (s statDecision) Process(reason, origin, action string, val int) { + if _, ok := s[reason]; !ok { + s[reason] = make(map[string]map[string]int) + } + + if _, ok := s[reason][origin]; !ok { + s[reason][origin] = make(map[string]int) + } + + s[reason][origin][action] += val +} + +func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Reason", "Origin", "Action", "Count") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := 0 + + for reason, origins := range s { + for origin, actions := range origins { + for action, hits := range actions { + t.AddRow( + reason, + origin, + action, + strconv.Itoa(hits), + ) + + numRows++ + } + } + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapi.go b/cmd/crowdsec-cli/metrics/statlapi.go new file mode 100644 index 00000000000..f8a737e5c44 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapi.go @@ -0,0 +1,67 @@ +package metrics + +import ( + "io" + "sort" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapi map[string]map[string]int + +func (s statLapi) Description() (string, string) { + return "Local API Metrics", + `Monitors the requests made to local API routes.` +} + +func (s statLapi) Process(route, method string, val int) { + if _, ok := s[route]; !ok { + s[route] = make(map[string]int) + } + + s[route][method] += val +} + +func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Route", "Method", "Hits") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + + // unfortunately, we can't reuse metricsToTable as the structure is too different :/ + numRows := 0 + + for _, alabel := range maptools.SortedKeys(s) { + astats := s[alabel] + + subKeys := []string{} + for skey := range astats { + subKeys = append(subKeys, skey) + } + + sort.Strings(subKeys) + + for _, sl := range subKeys { + row := []string{ + alabel, + sl, + strconv.Itoa(astats[sl]), + } + + t.AddRow(row...) + + numRows++ + } + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapibouncer.go b/cmd/crowdsec-cli/metrics/statlapibouncer.go new file mode 100644 index 00000000000..e7483c6294d --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapibouncer.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapiBouncer map[string]map[string]map[string]int + +func (s statLapiBouncer) Description() (string, string) { + return "Local API Bouncers Metrics", + `Tracks total hits to remediation component related API routes.` +} + +func (s statLapiBouncer) Process(bouncer, route, method string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = make(map[string]map[string]int) + } + + if _, ok := s[bouncer][route]; !ok { + s[bouncer][route] = make(map[string]int) + } + + s[bouncer][route][method] += val +} + +func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Bouncer", "Route", "Method", "Hits") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := lapiMetricsToTable(t, s) + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapidecision.go b/cmd/crowdsec-cli/metrics/statlapidecision.go new file mode 100644 index 00000000000..97e17fe8a49 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapidecision.go @@ -0,0 +1,65 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapiDecision map[string]struct { + NonEmpty int + Empty int +} + +func (s statLapiDecision) Description() (string, string) { + return "Local API Bouncers Decisions", + `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` +} + +func (s statLapiDecision) Process(bouncer, fam string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = struct { + NonEmpty int + Empty int + }{} + } + + x := s[bouncer] + + switch fam { + case "cs_lapi_decisions_ko_total": + x.Empty += val + case "cs_lapi_decisions_ok_total": + x.NonEmpty += val + } + + s[bouncer] = x +} + +func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := 0 + + for bouncer, hits := range s { + t.AddRow( + bouncer, + strconv.Itoa(hits.Empty), + strconv.Itoa(hits.NonEmpty), + ) + + numRows++ + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapimachine.go b/cmd/crowdsec-cli/metrics/statlapimachine.go new file mode 100644 index 00000000000..6b9d9da207e --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapimachine.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapiMachine map[string]map[string]map[string]int + +func (s statLapiMachine) Description() (string, string) { + return "Local API Machines Metrics", + `Tracks the number of calls to the local API from each registered machine.` +} + +func (s statLapiMachine) Process(machine, route, method string, val int) { + if _, ok := s[machine]; !ok { + s[machine] = make(map[string]map[string]int) + } + + if _, ok := s[machine][route]; !ok { + s[machine][route] = make(map[string]int) + } + + s[machine][route][method] += val +} + +func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Machine", "Route", "Method", "Hits") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := lapiMetricsToTable(t, s) + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/metrics/statparser.go new file mode 100644 index 00000000000..0ed7cedbe4b --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statparser.go @@ -0,0 +1,44 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statParser map[string]map[string]int + +func (s statParser) Description() (string, string) { + return "Parser Metrics", + `Tracks the number of events processed by each parser and indicates success of failure. ` + + `Zero parsed lines means the parer(s) failed. ` + + `Non-zero unparsed lines are fine as crowdsec select relevant lines.` +} + +func (s statParser) Process(parser, metric string, val int) { + if _, ok := s[parser]; !ok { + s[parser] = make(map[string]int) + } + + s[parser][metric] += val +} + +func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + keys := []string{"hits", "parsed", "unparsed"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting parsers stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statstash.go b/cmd/crowdsec-cli/metrics/statstash.go new file mode 100644 index 00000000000..79c14b04fd6 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statstash.go @@ -0,0 +1,61 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statStash map[string]struct { + Type string + Count int +} + +func (s statStash) Description() (string, string) { + return "Parser Stash Metrics", + `Tracks the status of stashes that might be created by various parsers and scenarios.` +} + +func (s statStash) Process(name, mtype string, val int) { + s[name] = struct { + Type string + Count int + }{ + Type: mtype, + Count: val, + } +} + +func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Name", "Type", "Items") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + + // unfortunately, we can't reuse metricsToTable as the structure is too different :/ + numRows := 0 + + for _, alabel := range maptools.SortedKeys(s) { + astats := s[alabel] + + row := []string{ + alabel, + astats.Type, + strconv.Itoa(astats.Count), + } + t.AddRow(row...) + + numRows++ + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statwhitelist.go b/cmd/crowdsec-cli/metrics/statwhitelist.go new file mode 100644 index 00000000000..89a016d22b0 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statwhitelist.go @@ -0,0 +1,44 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statWhitelist map[string]map[string]map[string]int + +func (s statWhitelist) Description() (string, string) { + return "Whitelist Metrics", + `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` +} + +func (s statWhitelist) Process(whitelist, reason, metric string, val int) { + if _, ok := s[whitelist]; !ok { + s[whitelist] = make(map[string]map[string]int) + } + + if _, ok := s[whitelist][reason]; !ok { + s[whitelist][reason] = make(map[string]int) + } + + s[whitelist][reason][metric] += val +} + +func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { + log.Warningf("while collecting parsers stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go new file mode 100644 index 00000000000..48926488c07 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/store.go @@ -0,0 +1,264 @@ +package metrics + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/go-cs-lib/trace" +) + +type metricSection interface { + Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) + Description() (string, string) +} + +type metricStore map[string]metricSection + +func NewMetricStore() metricStore { + return metricStore{ + "acquisition": statAcquis{}, + "scenarios": statBucket{}, + "parsers": statParser{}, + "lapi": statLapi{}, + "lapi-machine": statLapiMachine{}, + "lapi-bouncer": statLapiBouncer{}, + "lapi-decisions": statLapiDecision{}, + "decisions": statDecision{}, + "alerts": statAlert{}, + "stash": statStash{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, + "whitelists": statWhitelist{}, + } +} + +func (ms metricStore) Fetch(url string) error { + mfChan := make(chan *dto.MetricFamily, 1024) + errChan := make(chan error, 1) + + // Start with the DefaultTransport for sane defaults. + transport := http.DefaultTransport.(*http.Transport).Clone() + // Conservatively disable HTTP keep-alives as this program will only + // ever need a single HTTP request. + transport.DisableKeepAlives = true + // Timeout early if the server doesn't even return the headers. + transport.ResponseHeaderTimeout = time.Minute + go func() { + defer trace.CatchPanic("crowdsec/ShowPrometheus") + + err := prom2json.FetchMetricFamilies(url, mfChan, transport) + if err != nil { + errChan <- fmt.Errorf("failed to fetch metrics: %w", err) + return + } + errChan <- nil + }() + + result := []*prom2json.Family{} + for mf := range mfChan { + result = append(result, prom2json.NewFamily(mf)) + } + + if err := <-errChan; err != nil { + return err + } + + log.Debugf("Finished reading metrics output, %d entries", len(result)) + /*walk*/ + + mAcquis := ms["acquisition"].(statAcquis) + mParser := ms["parsers"].(statParser) + mBucket := ms["scenarios"].(statBucket) + mLapi := ms["lapi"].(statLapi) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) + mLapiDecision := ms["lapi-decisions"].(statLapiDecision) + mDecision := ms["decisions"].(statDecision) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mAlert := ms["alerts"].(statAlert) + mStash := ms["stash"].(statStash) + mWhitelist := ms["whitelists"].(statWhitelist) + + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + + log.Tracef("round %d", idx) + + for _, m := range fam.Metrics { + metric, ok := m.(prom2json.Metric) + if !ok { + log.Debugf("failed to convert metric to prom2json.Metric") + continue + } + + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + + source, ok := metric.Labels["source"] + if !ok { + log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) + } else { + if srctype, ok := metric.Labels["type"]; ok { + source = srctype + ":" + source + } + } + + value := m.(prom2json.Metric).Value + machine := metric.Labels["machine"] + bouncer := metric.Labels["bouncer"] + + route := metric.Labels["route"] + method := metric.Labels["method"] + + reason := metric.Labels["reason"] + origin := metric.Labels["origin"] + action := metric.Labels["action"] + + appsecEngine := metric.Labels["appsec_engine"] + appsecRule := metric.Labels["rule_name"] + + mtype := metric.Labels["type"] + + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + } + + ival := int(fval) + + switch fam.Name { + // + // buckets + // + case "cs_bucket_created_total": + mBucket.Process(name, "instantiation", ival) + case "cs_buckets": + mBucket.Process(name, "curr_count", ival) + case "cs_bucket_overflowed_total": + mBucket.Process(name, "overflow", ival) + case "cs_bucket_poured_total": + mBucket.Process(name, "pour", ival) + mAcquis.Process(source, "pour", ival) + case "cs_bucket_underflowed_total": + mBucket.Process(name, "underflow", ival) + // + // parsers + // + case "cs_parser_hits_total": + mAcquis.Process(source, "reads", ival) + case "cs_parser_hits_ok_total": + mAcquis.Process(source, "parsed", ival) + case "cs_parser_hits_ko_total": + mAcquis.Process(source, "unparsed", ival) + case "cs_node_hits_total": + mParser.Process(name, "hits", ival) + case "cs_node_hits_ok_total": + mParser.Process(name, "parsed", ival) + case "cs_node_hits_ko_total": + mParser.Process(name, "unparsed", ival) + // + // whitelists + // + case "cs_node_wl_hits_total": + mWhitelist.Process(name, reason, "hits", ival) + case "cs_node_wl_hits_ok_total": + mWhitelist.Process(name, reason, "whitelisted", ival) + // track as well whitelisted lines at acquis level + mAcquis.Process(source, "whitelisted", ival) + // + // lapi + // + case "cs_lapi_route_requests_total": + mLapi.Process(route, method, ival) + case "cs_lapi_machine_requests_total": + mLapiMachine.Process(machine, route, method, ival) + case "cs_lapi_bouncer_requests_total": + mLapiBouncer.Process(bouncer, route, method, ival) + case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": + mLapiDecision.Process(bouncer, fam.Name, ival) + // + // decisions + // + case "cs_active_decisions": + mDecision.Process(reason, origin, action, ival) + case "cs_alerts": + mAlert.Process(reason, ival) + // + // stash + // + case "cs_cache_size": + mStash.Process(name, mtype, ival) + // + // appsec + // + case "cs_appsec_reqs_total": + mAppsecEngine.Process(appsecEngine, "processed", ival) + case "cs_appsec_block_total": + mAppsecEngine.Process(appsecEngine, "blocked", ival) + case "cs_appsec_rule_hits": + mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival) + default: + log.Debugf("unknown: %+v", fam.Name) + continue + } + } + } + + return nil +} + +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { + // copy only the sections we want + want := map[string]metricSection{} + + // if explicitly asking for sections, we want to show empty tables + showEmpty := len(sections) > 0 + + // if no sections are specified, we want all of them + if len(sections) == 0 { + sections = maptools.SortedKeys(ms) + } + + for _, section := range sections { + want[section] = ms[section] + } + + switch formatType { + case "human": + for _, section := range maptools.SortedKeys(want) { + want[section].Table(out, wantColor, noUnit, showEmpty) + } + case "json": + x, err := json.MarshalIndent(want, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal metrics: %w", err) + } + out.Write(x) + case "raw": + x, err := yaml.Marshal(want) + if err != nil { + return fmt.Errorf("failed to marshal metrics: %w", err) + } + out.Write(x) + default: + return fmt.Errorf("unknown format type %s", formatType) + } + + return nil +} diff --git a/cmd/crowdsec-cli/metrics/table.go b/cmd/crowdsec-cli/metrics/table.go new file mode 100644 index 00000000000..f51e905ba71 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/table.go @@ -0,0 +1,130 @@ +package metrics + +import ( + "errors" + "sort" + "strconv" + + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. +var ErrNilTable = errors.New("nil table") + +func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { + // stats: machine -> route -> method -> count + // sort keys to keep consistent order when printing + machineKeys := []string{} + for k := range stats { + machineKeys = append(machineKeys, k) + } + + sort.Strings(machineKeys) + + numRows := 0 + + for _, machine := range machineKeys { + // oneRow: route -> method -> count + machineRow := stats[machine] + for routeName, route := range machineRow { + for methodName, count := range route { + row := []string{ + machine, + routeName, + methodName, + } + if count != 0 { + row = append(row, strconv.Itoa(count)) + } else { + row = append(row, "-") + } + + t.AddRow(row...) + + numRows++ + } + } + } + + return numRows +} + +func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { + if t == nil { + return 0, ErrNilTable + } + + numRows := 0 + + for _, name := range maptools.SortedKeys(stats) { + for _, reason := range maptools.SortedKeys(stats[name]) { + row := []string{ + name, + reason, + "-", + "-", + } + + for _, action := range maptools.SortedKeys(stats[name][reason]) { + value := stats[name][reason][action] + + switch action { + case "whitelisted": + row[3] = strconv.Itoa(value) + case "hits": + row[2] = strconv.Itoa(value) + default: + log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) + } + } + + t.AddRow(row...) + + numRows++ + } + } + + return numRows, nil +} + +func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { + if t == nil { + return 0, ErrNilTable + } + + numRows := 0 + + for _, alabel := range maptools.SortedKeys(stats) { + astats, ok := stats[alabel] + if !ok { + continue + } + + row := []string{ + alabel, + } + + for _, sl := range keys { + if v, ok := astats[sl]; ok && v != 0 { + numberToShow := strconv.Itoa(v) + if !noUnit { + numberToShow = formatNumber(v) + } + + row = append(row, numberToShow) + } else { + row = append(row, "-") + } + } + + t.AddRow(row...) + + numRows++ + } + + return numRows, nil +} From 96a6eec1fbb4d6e49f73fa21d20c2cf7877efb67 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 5 Jul 2024 16:04:33 +0100 Subject: [PATCH 211/318] enhance: Add default_range_remediation (#3109) --- config/profiles.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/config/profiles.yaml b/config/profiles.yaml index 9d81c9298a2..c4982acd978 100644 --- a/config/profiles.yaml +++ b/config/profiles.yaml @@ -12,3 +12,18 @@ decisions: # - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. # - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. on_success: break +--- +name: default_range_remediation +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Range" +decisions: + - type: ban + duration: 4h +#duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) +# notifications: +# - slack_default # Set the webhook in /etc/crowdsec/notifications/slack.yaml before enabling this. +# - splunk_default # Set the splunk url and token in /etc/crowdsec/notifications/splunk.yaml before enabling this. +# - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. +# - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. +on_success: break From aa0c389d2b9681a4a068337a89846d8bb404bbf0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 9 Jul 2024 14:49:55 +0200 Subject: [PATCH 212/318] make: remove redundant go version check (#3118) It's already done by the toolchain with go.mod --- Makefile | 19 +++++++------------ mk/check_go_version.ps1 | 19 ------------------- mk/goversion.mk | 36 ------------------------------------ 3 files changed, 7 insertions(+), 67 deletions(-) delete mode 100644 mk/check_go_version.ps1 delete mode 100644 mk/goversion.mk diff --git a/Makefile b/Makefile index 324be04683d..97eb7ba0eea 100644 --- a/Makefile +++ b/Makefile @@ -25,10 +25,6 @@ BUILD_STATIC ?= 0 # List of plugins to build PLUGINS ?= $(patsubst ./cmd/notification-%,%,$(wildcard ./cmd/notification-*)) -# Can be overriden, if you can deal with the consequences -BUILD_REQUIRE_GO_MAJOR ?= 1 -BUILD_REQUIRE_GO_MINOR ?= 21 - #-------------------------------------- GO = go @@ -128,10 +124,10 @@ endif #-------------------------------------- .PHONY: build -build: pre-build goversion crowdsec cscli plugins ## Build crowdsec, cscli and plugins +build: build-info crowdsec cscli plugins ## Build crowdsec, cscli and plugins -.PHONY: pre-build -pre-build: ## Sanity checks and build information +.PHONY: build-info +build-info: ## Print build information $(info Building $(BUILD_VERSION) ($(BUILD_TAG)) $(BUILD_TYPE) for $(GOOS)/$(GOARCH)) ifneq (,$(RE2_FAIL)) @@ -195,11 +191,11 @@ clean: clean-debian clean-rpm testclean ## Remove build artifacts ) .PHONY: cscli -cscli: goversion ## Build cscli +cscli: ## Build cscli @$(MAKE) -C $(CSCLI_FOLDER) build $(MAKE_FLAGS) .PHONY: crowdsec -crowdsec: goversion ## Build crowdsec +crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) .PHONY: generate @@ -223,11 +219,11 @@ testenv: @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it' .PHONY: test -test: testenv goversion ## Run unit tests with localstack +test: testenv ## Run unit tests with localstack $(GOTEST) $(LD_OPTS) ./... .PHONY: go-acc -go-acc: testenv goversion ## Run unit tests with localstack + coverage +go-acc: testenv ## Run unit tests with localstack + coverage go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models -- $(LD_OPTS) check_docker: @@ -305,5 +301,4 @@ else include test/bats.mk endif -include mk/goversion.mk include mk/help.mk diff --git a/mk/check_go_version.ps1 b/mk/check_go_version.ps1 deleted file mode 100644 index 6060cb22751..00000000000 --- a/mk/check_go_version.ps1 +++ /dev/null @@ -1,19 +0,0 @@ -##This must be called with $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) in this order -$min_major=$args[0] -$min_minor=$args[1] -$goversion = (go env GOVERSION).replace("go","").split(".") -$goversion_major=$goversion[0] -$goversion_minor=$goversion[1] -$err_msg="Golang version $goversion_major.$goversion_minor is not supported, please use least $min_major.$min_minor" - -if ( $goversion_major -gt $min_major ) { - exit 0; -} -elseif ($goversion_major -lt $min_major) { - Write-Output $err_msg; - exit 1; -} -elseif ($goversion_minor -lt $min_minor) { - Write-Output $(GO_VERSION_VALIDATION_ERR_MSG); - exit 1; -} diff --git a/mk/goversion.mk b/mk/goversion.mk deleted file mode 100644 index 73e9a72e232..00000000000 --- a/mk/goversion.mk +++ /dev/null @@ -1,36 +0,0 @@ - -BUILD_GOVERSION = $(subst go,,$(shell $(GO) env GOVERSION)) - -go_major_minor = $(subst ., ,$(BUILD_GOVERSION)) -GO_MAJOR_VERSION = $(word 1, $(go_major_minor)) -GO_MINOR_VERSION = $(word 2, $(go_major_minor)) - -GO_VERSION_VALIDATION_ERR_MSG = Golang version ($(BUILD_GOVERSION)) is not supported, please use at least $(BUILD_REQUIRE_GO_MAJOR).$(BUILD_REQUIRE_GO_MINOR) - - -.PHONY: goversion -goversion: $(if $(findstring devel,$(shell $(GO) env GOVERSION)),goversion_devel,goversion_check) - - -.PHONY: goversion_devel -goversion_devel: - $(warning WARNING: You are using a development version of Golang ($(BUILD_GOVERSION)) which is not supported. For production environments, use a stable version (at least $(BUILD_REQUIRE_GO_MAJOR).$(BUILD_REQUIRE_GO_MINOR))) - $(info ) - - -.PHONY: goversion_check -goversion_check: -ifneq ($(OS), Windows_NT) - @if [ $(GO_MAJOR_VERSION) -gt $(BUILD_REQUIRE_GO_MAJOR) ]; then \ - exit 0; \ - elif [ $(GO_MAJOR_VERSION) -lt $(BUILD_REQUIRE_GO_MAJOR) ]; then \ - echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ - exit 1; \ - elif [ $(GO_MINOR_VERSION) -lt $(BUILD_REQUIRE_GO_MINOR) ] ; then \ - echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ - exit 1; \ - fi -else - # This needs Set-ExecutionPolicy -Scope CurrentUser Unrestricted - @$(CURDIR)/mk/check_go_version.ps1 $(BUILD_REQUIRE_GO_MAJOR) $(BUILD_REQUIRE_GO_MINOR) -endif From aab59b628794e933e7313d0cd25f5f746b0b2530 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 9 Jul 2024 14:51:31 +0200 Subject: [PATCH 213/318] typos (#3104) * typos * lint --- .golangci.yml | 8 -------- cmd/crowdsec-cli/metrics/statacquis.go | 2 +- cmd/crowdsec-cli/metrics/statparser.go | 2 +- cmd/crowdsec-cli/notifications.go | 2 +- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 855c73f9af3..2ac39793731 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -407,18 +407,10 @@ issues: - errorlint text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - - errorlint - text: "type assertion on error will fail on wrapped errors. Use errors.Is to check for specific errors" - - linters: - errorlint text: "comparing with .* will fail on wrapped errors. Use errors.Is to check for a specific error" - - linters: - - errorlint - text: "switch on an error will fail on wrapped errors. Use errors.Is to check for specific errors" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/metrics/statacquis.go index 4a8cf0f8934..c004f64f17a 100644 --- a/cmd/crowdsec-cli/metrics/statacquis.go +++ b/cmd/crowdsec-cli/metrics/statacquis.go @@ -15,7 +15,7 @@ func (s statAcquis) Description() (string, string) { return "Acquisition Metrics", `Measures the lines read, parsed, and unparsed per datasource. ` + `Zero read lines indicate a misconfigured or inactive datasource. ` + - `Zero parsed lines mean the parser(s) failed. ` + + `Zero parsed lines means the parser(s) failed. ` + `Non-zero parsed lines are fine as crowdsec selects relevant lines.` } diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/metrics/statparser.go index 0ed7cedbe4b..d8d651f269f 100644 --- a/cmd/crowdsec-cli/metrics/statparser.go +++ b/cmd/crowdsec-cli/metrics/statparser.go @@ -14,7 +14,7 @@ type statParser map[string]map[string]int func (s statParser) Description() (string, string) { return "Parser Metrics", `Tracks the number of events processed by each parser and indicates success of failure. ` + - `Zero parsed lines means the parer(s) failed. ` + + `Zero parsed lines means the parser(s) failed. ` + `Non-zero unparsed lines are fine as crowdsec select relevant lines.` } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 4d757fca44e..8c6b6631b33 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -112,7 +112,7 @@ func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfi func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, error) { cfg := cli.cfg() - // A bit of a tricky stuf now: reconcile profiles and notification plugins + // A bit of a tricky stuff now: reconcile profiles and notification plugins pcfgs, err := cli.getPluginConfigs() if err != nil { return nil, err From 31195ddf36288a4091039dc162c87cf469791ec8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 9 Jul 2024 15:30:38 +0200 Subject: [PATCH 214/318] update vagrant config for opensuse (#3119) --- .../experimental/{opensuse-15.4 => opensuse-15.6}/Vagrantfile | 3 ++- test/ansible/vagrant/experimental/opensuse-15.6/bootstrap | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) rename test/ansible/vagrant/experimental/{opensuse-15.4 => opensuse-15.6}/Vagrantfile (84%) create mode 100644 test/ansible/vagrant/experimental/opensuse-15.6/bootstrap diff --git a/test/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile b/test/ansible/vagrant/experimental/opensuse-15.6/Vagrantfile similarity index 84% rename from test/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile rename to test/ansible/vagrant/experimental/opensuse-15.6/Vagrantfile index 4a3ec307c4f..f2dc70816c9 100644 --- a/test/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile +++ b/test/ansible/vagrant/experimental/opensuse-15.6/Vagrantfile @@ -1,7 +1,8 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - config.vm.box = 'opensuse/Leap-15.4.x86_64' + config.vm.box = 'opensuse/Leap-15.6.x86_64' + config.vm.box_version = "15.6.13.280" config.vm.define 'crowdsec' config.vm.provision 'shell', path: 'bootstrap' diff --git a/test/ansible/vagrant/experimental/opensuse-15.6/bootstrap b/test/ansible/vagrant/experimental/opensuse-15.6/bootstrap new file mode 100644 index 00000000000..a43165d1828 --- /dev/null +++ b/test/ansible/vagrant/experimental/opensuse-15.6/bootstrap @@ -0,0 +1,3 @@ +#!/bin/sh + +zypper install -y kitty-terminfo From 6f5e970f502f06185401e8182c9976f11d5b47df Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:57:16 +0200 Subject: [PATCH 215/318] func tests: update curl wrapper (#3121) --- test/bats/00_wait_for.bats | 1 - test/bats/01_crowdsec_lapi.bats | 1 - test/bats/01_cscli.bats | 1 - test/bats/07_setup.bats | 1 - test/bats/10_bouncers.bats | 2 +- test/bats/11_bouncers_tls.bats | 16 ++++++++------ test/bats/20_hub_items.bats | 4 ++-- test/bats/20_hub_scenarios.bats | 2 +- test/bats/30_machines_tls.bats | 1 - test/bats/90_decisions.bats | 1 - test/bats/97_ipv4_single.bats | 12 +++++------ test/bats/97_ipv6_single.bats | 16 +++++++------- test/bats/98_ipv4_range.bats | 16 +++++++------- test/bats/98_ipv6_range.bats | 24 ++++++++++----------- test/bats/99_lapi-stream-mode-scenario.bats | 24 ++++++++++----------- test/bats/99_lapi-stream-mode-scopes.bats | 8 +++---- test/bats/99_lapi-stream-mode.bats | 8 +++---- test/lib/setup_file.sh | 16 ++++++++------ 18 files changed, 76 insertions(+), 78 deletions(-) diff --git a/test/bats/00_wait_for.bats b/test/bats/00_wait_for.bats index ffc6802d9bc..94c65033bb4 100644 --- a/test/bats/00_wait_for.bats +++ b/test/bats/00_wait_for.bats @@ -68,4 +68,3 @@ setup() { 2 EOT } - diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 0f6c41cc53d..21e1d7a093e 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -48,4 +48,3 @@ teardown() { rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "$CROWDSEC" -no-cs } - diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index a0878ad0e12..792274cc4f4 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -130,7 +130,6 @@ teardown() { EOT } - @test "cscli - required configuration paths" { config=$(cat "$CONFIG_YAML") configdir=$(config_get '.config_paths.config_dir') diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index 9e3f5533728..2106d3ab6b2 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -822,4 +822,3 @@ update-notifier-motd.timer enabled enabled assert_output "while unmarshaling setup file: yaml: line 2: could not find expected ':'" assert_stderr --partial "invalid setup file" } - diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 9e795e584b4..b6efbd06650 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -68,7 +68,7 @@ teardown() { assert_output --regexp 'ciTestBouncer.*api-key.*' # the first connection sets last_pull and ip address - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 cscli bouncers list -o json rune -0 jq -r '.[] | .ip_address' <(output) assert_output 127.0.0.1 diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 765e93ebee2..849b3a5b35c 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -146,12 +146,13 @@ teardown() { } @test "simulate a bouncer request with a valid cert" { - rune -0 curl -f -s \ + rune -0 curl --fail-with-body -sS \ --cert "$tmpdir/leaf.pem" \ --key "$tmpdir/leaf-key.pem" \ --cacert "$tmpdir/bundle.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_output "null" + refute_stderr rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) assert_output '1' @@ -162,22 +163,24 @@ teardown() { } @test "simulate a bouncer request with an invalid cert" { - rune -77 curl -f -s \ + rune -77 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_invalid.pem" \ --key "$tmpdir/leaf_invalid-key.pem" \ --cacert "$tmpdir/root-key.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_stderr --partial 'error setting certificate file' rune -0 cscli bouncers list -o json assert_output "[]" } @test "simulate a bouncer request with an invalid OU" { - rune -0 curl -s \ + rune -22 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_bad_ou.pem" \ --key "$tmpdir/leaf_bad_ou-key.pem" \ --cacert "$tmpdir/bundle.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 - assert_json '{message:"access forbidden"}' + assert_json '{message: "access forbidden"}' + assert_stderr --partial 'error: 403' rune -0 cscli bouncers list -o json assert_output "[]" } @@ -187,13 +190,14 @@ teardown() { # we connect twice to test the cache too for cert in "leaf_rev1" "leaf_rev2" "leaf_rev1" "leaf_rev2"; do truncate_log - rune -0 curl -s \ + rune -22 curl --fail-with-body -sS \ --cert "$tmpdir/$cert.pem" \ --key "$tmpdir/$cert-key.pem" \ --cacert "$tmpdir/bundle.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_log --partial "certificate revoked by CRL" - assert_output --partial "access forbidden" + assert_json '{message: "access forbidden"}' + assert_stderr --partial "error: 403" rune -0 cscli bouncers list -o json assert_output "[]" done diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index c6dbafc0911..1846b3e424c 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -46,7 +46,7 @@ teardown() { '. * {collections:{"crowdsecurity/sshd":{"versions":{"1.2":{"digest":$DIGEST, "deprecated": false}, "1.10": {"digest":$DIGEST, "deprecated": false}}}}}' \ ) echo "$new_hub" >"$INDEX_PATH" - + rune -0 cscli collections install crowdsecurity/sshd truncate -s 0 "$CONFIG_DIR/collections/sshd.yaml" @@ -78,7 +78,7 @@ teardown() { '. * {collections:{"crowdsecurity/sshd":{"versions":{"1.2.3.4":{"digest":"foo", "deprecated": false}}}}}' \ ) echo "$new_hub" >"$INDEX_PATH" - + rune -0 cscli collections install crowdsecurity/sshd rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json # XXX: we are on the verbose side here... diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 3089e244cf1..4e4b28e7703 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -96,7 +96,7 @@ teardown() { # non-existent rune -1 cscli scenario install foo/bar assert_stderr --partial "can't find 'foo/bar' in scenarios" - + # not installed rune -0 cscli scenarios list crowdsecurity/ssh-bf assert_output --regexp 'crowdsecurity/ssh-bf.*disabled' diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index ef2915e3880..ef02d1b57c3 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -3,7 +3,6 @@ set -u - # root: root CA # inter: intermediate CA # inter_rev: intermediate CA revoked by root (CRL3) diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index be6e905356e..c7ed214ffc9 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -179,7 +179,6 @@ teardown() { # disarding only some invalid decisions - rune -0 cscli alerts delete --all truncate -s 0 "$LOGFILE" diff --git a/test/bats/97_ipv4_single.bats b/test/bats/97_ipv4_single.bats index f02d9ebd503..b709930e2e5 100644 --- a/test/bats/97_ipv4_single.bats +++ b/test/bats/97_ipv4_single.bats @@ -30,7 +30,7 @@ setup() { } @test "API - first decisions list: must be empty" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' assert_output 'null' } @@ -46,7 +46,7 @@ setup() { } @test "API - all decisions" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 jq -c '[ . | length, .[0].value ]' <(output) assert_output '[1,"1.2.3.4"]' } @@ -60,7 +60,7 @@ setup() { } @test "API - decision for 1.2.3.4" { - rune -0 lapi-get '/v1/decisions?ip=1.2.3.4' + rune -0 curl-with-key '/v1/decisions?ip=1.2.3.4' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } @@ -71,7 +71,7 @@ setup() { } @test "API - decision for 1.2.3.5" { - rune -0 lapi-get '/v1/decisions?ip=1.2.3.5' + rune -0 curl-with-key '/v1/decisions?ip=1.2.3.5' assert_output 'null' } @@ -83,7 +83,7 @@ setup() { } @test "API - decision for 1.2.3.0/24" { - rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24' + rune -0 curl-with-key '/v1/decisions?range=1.2.3.0/24' assert_output 'null' } @@ -94,7 +94,7 @@ setup() { } @test "API - decisions where IP in 1.2.3.0/24" { - rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24&contains=false' + rune -0 curl-with-key '/v1/decisions?range=1.2.3.0/24&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index 3cb1cedd0c2..c7aea030f9c 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -41,7 +41,7 @@ setup() { } @test "API - all decisions" { - rune -0 lapi-get "/v1/decisions" + rune -0 curl-with-key "/v1/decisions" rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -53,7 +53,7 @@ setup() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:888" { - rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -64,7 +64,7 @@ setup() { } @test "API - decisions for ip 1211:2222:3333:4444:5555:6666:7777:888" { - rune -0 lapi-get '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' assert_output 'null' } @@ -74,7 +74,7 @@ setup() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8887" { - rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' + rune -0 curl-with-key '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -84,7 +84,7 @@ setup() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -95,7 +95,7 @@ setup() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -106,7 +106,7 @@ setup() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' assert_output 'null' } @@ -117,7 +117,7 @@ setup() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } diff --git a/test/bats/98_ipv4_range.bats b/test/bats/98_ipv4_range.bats index a58f144b885..c85e40267f3 100644 --- a/test/bats/98_ipv4_range.bats +++ b/test/bats/98_ipv4_range.bats @@ -41,7 +41,7 @@ setup() { } @test "API - all decisions" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -55,7 +55,7 @@ setup() { } @test "API - decisions for ip 4.4.4." { - rune -0 lapi-get '/v1/decisions?ip=4.4.4.3' + rune -0 curl-with-key '/v1/decisions?ip=4.4.4.3' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -66,7 +66,7 @@ setup() { } @test "API - decisions for ip contained in 4.4.4." { - rune -0 lapi-get '/v1/decisions?ip=4.4.4.4&contains=false' + rune -0 curl-with-key '/v1/decisions?ip=4.4.4.4&contains=false' assert_output 'null' } @@ -76,7 +76,7 @@ setup() { } @test "API - decisions for ip 5.4.4." { - rune -0 lapi-get '/v1/decisions?ip=5.4.4.3' + rune -0 curl-with-key '/v1/decisions?ip=5.4.4.3' assert_output 'null' } @@ -86,7 +86,7 @@ setup() { } @test "API - decisions for range 4.4.0.0/1" { - rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16' + rune -0 curl-with-key '/v1/decisions?range=4.4.0.0/16' assert_output 'null' } @@ -97,7 +97,7 @@ setup() { } @test "API - decisions for ip/range in 4.4.0.0/1" { - rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16&contains=false' + rune -0 curl-with-key '/v1/decisions?range=4.4.0.0/16&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -111,7 +111,7 @@ setup() { } @test "API - decisions for range 4.4.4.2/2" { - rune -0 lapi-get '/v1/decisions?range=4.4.4.2/28' + rune -0 curl-with-key '/v1/decisions?range=4.4.4.2/28' rune -0 jq -r '.[].value' <(output) assert_output '4.4.4.0/24' } @@ -122,6 +122,6 @@ setup() { } @test "API - decisions for range 4.4.3.2/2" { - rune -0 lapi-get '/v1/decisions?range=4.4.3.2/28' + rune -0 curl-with-key '/v1/decisions?range=4.4.3.2/28' assert_output 'null' } diff --git a/test/bats/98_ipv6_range.bats b/test/bats/98_ipv6_range.bats index 065f32b74ba..531122a5533 100644 --- a/test/bats/98_ipv6_range.bats +++ b/test/bats/98_ipv6_range.bats @@ -41,7 +41,7 @@ setup() { } @test "API - all decisions (2)" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -55,7 +55,7 @@ setup() { } @test "API - decisions for ip aaaa:2222:3333:4444:5555:6666:7777:8888" { - rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -66,7 +66,7 @@ setup() { } @test "API - decisions for ip aaaa:2222:3333:4445:5555:6666:7777:8888" { - rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' assert_output 'null' } @@ -76,7 +76,7 @@ setup() { } @test "API - decisions for ip aaa1:2222:3333:4444:5555:6666:7777:8887" { - rune -0 lapi-get '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' + rune -0 curl-with-key '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -89,7 +89,7 @@ setup() { } @test "API - decisions for range aaaa:2222:3333:4444:5555::/80" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -101,7 +101,7 @@ setup() { } @test "API - decisions for range aaaa:2222:3333:4441:5555::/80" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' assert_output 'null' } @@ -111,7 +111,7 @@ setup() { } @test "API - decisions for range aaa1:2222:3333:4444:5555::/80" { - rune -0 lapi-get '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' + rune -0 curl-with-key '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' assert_output 'null' } @@ -123,7 +123,7 @@ setup() { } @test "API - decisions for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -134,7 +134,7 @@ setup() { } @test "API - decisions for ip/range in aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -145,7 +145,7 @@ setup() { } @test "API - decisions for ip/range in aaaa:2222:3333:4445:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' assert_output 'null' } @@ -163,7 +163,7 @@ setup() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { - rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + rune -0 curl-with-key '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' rune -0 jq -r '.[].value' <(output) assert_output 'bbbb:db8::/81' } @@ -174,7 +174,7 @@ setup() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { - rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + rune -0 curl-with-key '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' assert_output 'null' } diff --git a/test/bats/99_lapi-stream-mode-scenario.bats b/test/bats/99_lapi-stream-mode-scenario.bats index 1cd44c1ae80..32c346061d1 100644 --- a/test/bats/99_lapi-stream-mode-scenario.bats +++ b/test/bats/99_lapi-stream-mode-scenario.bats @@ -26,7 +26,6 @@ output_new_decisions() { jq -c '.new | map(select(.origin!="CAPI")) | .[] | del(.id) | (.. | .duration?) |= capture("(?[[:digit:]]+h[[:digit:]]+m)").d' <(output) | sort } - @test "adding decisions with different duration, scenario, origin" { # origin: test rune -0 cscli decisions add -i 127.0.0.1 -d 1h -R crowdsecurity/test @@ -55,7 +54,7 @@ output_new_decisions() { } @test "test startup" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -64,7 +63,7 @@ output_new_decisions() { } @test "test startup with scenarios containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -73,7 +72,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -82,12 +81,12 @@ output_new_decisions() { } @test "test startup with unknown scenarios containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=unknown" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=unknown" assert_output '{"deleted":null,"new":null}' } @test "test startup with scenarios containing and not containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -96,7 +95,7 @@ output_new_decisions() { } @test "test startup with scenarios containing and not containing 2" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -104,7 +103,7 @@ output_new_decisions() { } @test "test startup with scenarios not containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -113,7 +112,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios not containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -121,7 +120,7 @@ output_new_decisions() { } @test "test startup with origins parameter" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&origins=another_origin" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"1h59m","origin":"another_origin","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -130,7 +129,7 @@ output_new_decisions() { } @test "test startup with multiple origins parameter" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&origins=another_origin,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -139,7 +138,7 @@ output_new_decisions() { } @test "test startup with unknown origins" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=unknown" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&origins=unknown" assert_output '{"deleted":null,"new":null}' } @@ -223,4 +222,3 @@ output_new_decisions() { # NewChecks: []DecisionCheck{}, # }, #} - diff --git a/test/bats/99_lapi-stream-mode-scopes.bats b/test/bats/99_lapi-stream-mode-scopes.bats index a9ed494e69c..67badebea0e 100644 --- a/test/bats/99_lapi-stream-mode-scopes.bats +++ b/test/bats/99_lapi-stream-mode-scopes.bats @@ -29,28 +29,28 @@ setup() { } @test "stream start (implicit ip scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (explicit ip scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=ip" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scopes=ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (user scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scopes=user" rune -0 jq -r '.new' <(output) refute_output --partial '1.2.3.6' assert_output --partial 'toto' } @test "stream start (user+ip scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user,ip" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scopes=user,ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' assert_output --partial 'toto' diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index c683a6150f4..b3ee8a434ff 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -31,7 +31,7 @@ setup() { } @test "stream start" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" if is_db_mysql; then sleep 3; fi rune -0 jq -r '.new' <(output) assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' @@ -42,7 +42,7 @@ setup() { @test "stream cont (add)" { rune -0 cscli decisions add -i '1.2.3.5' if is_db_mysql; then sleep 3; fi - rune -0 lapi-get "/v1/decisions/stream" + rune -0 curl-with-key "/v1/decisions/stream" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.5' } @@ -50,13 +50,13 @@ setup() { @test "stream cont (del)" { rune -0 cscli decisions delete -i '1.2.3.4' if is_db_mysql; then sleep 3; fi - rune -0 lapi-get "/v1/decisions/stream" + rune -0 curl-with-key "/v1/decisions/stream" rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' } @test "stream restart" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" api_out=${output} rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index ac651c68c4f..65c600d1769 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -283,15 +283,17 @@ rune() { export -f rune # call the lapi through unix socket with an API_KEY (authenticates as a bouncer) -lapi-get() { - [[ -z "$1" ]] && { fail "lapi-get: missing path"; } - [[ -z "$API_KEY" ]] && { fail "lapi-get: missing API_KEY"; } +# after $1, pass throught extra arguments to curl +curl-with-key() { + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 + shift + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } local socket socket=$(config_get '.api.server.listen_socket') - [[ -z "$socket" ]] && { fail "lapi-get: missing .api.server.listen_socket"; } + [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } # curl needs a fake hostname when using a unix socket - curl -s -f -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" + curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$path" "$@" } -export -f lapi-get - +export -f curl-with-key From 1b0104186aea4981556d86708b364f59884f9723 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 11 Jul 2024 12:19:10 +0200 Subject: [PATCH 216/318] cscli machines/bouncers: dry helper code and move to cscli (#3123) --- cmd/crowdsec-cli/bouncers.go | 90 +++++++++++++++++++++++++----------- cmd/crowdsec-cli/machines.go | 10 ++-- pkg/database/ent/helpers.go | 58 ++++++----------------- 3 files changed, 81 insertions(+), 77 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index d304b1b7867..f7237b8216c 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -27,6 +27,40 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type featureflagProvider interface { + GetFeatureflags() string +} + +type osProvider interface { + GetOsname() string + GetOsversion() string +} + +func getOSNameAndVersion(o osProvider) string { + ret := o.GetOsname() + if o.GetOsversion() != "" { + if ret != "" { + ret += "/" + } + + ret += o.GetOsversion() + } + + if ret == "" { + return "?" + } + + return ret +} + +func getFeatureFlagList(o featureflagProvider) []string { + if o.GetFeatureflags() == "" { + return nil + } + + return strings.Split(o.GetFeatureflags(), ",") +} + func askYesNo(message string, defaultAnswer bool) (bool, error) { var answer bool @@ -113,32 +147,32 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { // bouncerInfo contains only the data we want for inspect/list type bouncerInfo struct { - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Name string `json:"name"` - Revoked bool `json:"revoked"` - IPAddress string `json:"ip_address"` - Type string `json:"type"` - Version string `json:"version"` - LastPull *time.Time `json:"last_pull"` - AuthType string `json:"auth_type"` - OS string `json:"os,omitempty"` - Featureflags []string `json:"featureflags,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + IPAddress string `json:"ip_address"` + Type string `json:"type"` + Version string `json:"version"` + LastPull *time.Time `json:"last_pull"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` } func newBouncerInfo(b *ent.Bouncer) bouncerInfo { return bouncerInfo{ - CreatedAt: b.CreatedAt, - UpdatedAt: b.UpdatedAt, - Name: b.Name, - Revoked: b.Revoked, - IPAddress: b.IPAddress, - Type: b.Type, - Version: b.Version, - LastPull: b.LastPull, - AuthType: b.AuthType, - OS: b.GetOSNameAndVersion(), - Featureflags: b.GetFeatureFlagList(), + CreatedAt: b.CreatedAt, + UpdatedAt: b.UpdatedAt, + Name: b.Name, + Revoked: b.Revoked, + IPAddress: b.IPAddress, + Type: b.Type, + Version: b.Version, + LastPull: b.LastPull, + AuthType: b.AuthType, + OS: getOSNameAndVersion(b), + Featureflags: getFeatureFlagList(b), } } @@ -166,10 +200,10 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { } csvwriter.Flush() + return nil } - func (cli *cliBouncers) list(out io.Writer) error { bouncers, err := cli.db.ListBouncers() if err != nil { @@ -342,7 +376,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { func (cli *cliBouncers) prune(duration time.Duration, force bool) error { if duration < 2*time.Minute { if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. " + + "The duration you provided is less than 2 minutes. "+ "This may remove active bouncers. Continue?", false); err != nil { return err } else if !yes { @@ -365,7 +399,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { if !force { if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above bouncers from the database. " + + "You are about to PERMANENTLY remove the above bouncers from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err } else if !yes { @@ -434,10 +468,10 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"Version", bouncer.Version}, {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, - {"OS", bouncer.GetOSNameAndVersion()}, + {"OS", getOSNameAndVersion(bouncer)}, }) - for _, ff := range bouncer.GetFeatureFlagList() { + for _, ff := range getFeatureFlagList(bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } @@ -463,10 +497,10 @@ func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { default: return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } + return nil } - func (cli *cliBouncers) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect [bouncer_name]", diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 2da5e20229c..3489550b4fd 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -203,7 +203,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { hb = emoji.Warning + " " + hb } - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.GetOSNameAndVersion(), m.AuthType, hb}) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) } fmt.Fprintln(out, t.Render()) @@ -236,8 +236,8 @@ func newMachineInfo(m *ent.Machine) machineInfo { Version: m.Version, IsValidated: m.IsValidated, AuthType: m.AuthType, - OS: m.GetOSNameAndVersion(), - Featureflags: m.GetFeatureFlagList(), + OS: getOSNameAndVersion(m), + Featureflags: getFeatureFlagList(m), Datasources: m.Datasources, } } @@ -642,7 +642,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { {"Last Heartbeat", machine.LastHeartbeat}, {"Validated?", machine.IsValidated}, {"CrowdSec version", machine.Version}, - {"OS", machine.GetOSNameAndVersion()}, + {"OS", getOSNameAndVersion(machine)}, {"Auth type", machine.AuthType}, }) @@ -650,7 +650,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) } - for _, ff := range machine.GetFeatureFlagList() { + for _, ff := range getFeatureFlagList(machine) { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go index c6cdbd7f32b..9b30ce451e0 100644 --- a/pkg/database/ent/helpers.go +++ b/pkg/database/ent/helpers.go @@ -1,55 +1,25 @@ package ent -import ( - "strings" -) - -func (m *Machine) GetOSNameAndVersion() string { - ret := m.Osname - if m.Osversion != "" { - if ret != "" { - ret += "/" - } - - ret += m.Osversion - } - - if ret == "" { - return "?" - } - - return ret +func (m *Machine) GetOsname() string { + return m.Osname } -func (b *Bouncer) GetOSNameAndVersion() string { - ret := b.Osname - if b.Osversion != "" { - if ret != "" { - ret += "/" - } - - ret += b.Osversion - } - - if ret == "" { - return "?" - } - - return ret +func (b *Bouncer) GetOsname() string { + return b.Osname } -func (m *Machine) GetFeatureFlagList() []string { - if m.Featureflags == "" { - return nil - } +func (m *Machine) GetOsversion() string { + return m.Osversion +} - return strings.Split(m.Featureflags, ",") +func (b *Bouncer) GetOsversion() string { + return b.Osversion } -func (b *Bouncer) GetFeatureFlagList() []string { - if b.Featureflags == "" { - return nil - } +func (m *Machine) GetFeatureflags() string { + return m.Featureflags +} - return strings.Split(b.Featureflags, ",") +func (b *Bouncer) GetFeatureflags() string { + return b.Featureflags } From 06720538f5f35d97f986244e9fe349d129fb61ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 12 Jul 2024 17:02:44 +0200 Subject: [PATCH 217/318] revert "db: round created, updated... timestamps to 1 second" (#3127) this fixes functional tests with decision stream startup=true --- pkg/types/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 384dd00367e..712d44ba12d 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -66,7 +66,7 @@ func ConfigureLogger(clog *log.Logger) error { } func UtcNow() time.Time { - return time.Now().UTC().Round(time.Second) + return time.Now().UTC() } func IsNetworkFS(path string) (bool, string, error) { From f130ce677db5fa386d5491cd768b48e66f9c0585 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 09:55:52 +0200 Subject: [PATCH 218/318] command "cscli metrics show bouncers" (#3126) * cscli metrics show bouncers * db metrics: increase payload size * func tests --- .../{metrics => climetrics}/list.go | 46 ++- .../{metrics => climetrics}/metrics.go | 8 +- .../{metrics => climetrics}/number.go | 13 +- .../{metrics => climetrics}/show.go | 22 +- .../{metrics => climetrics}/statacquis.go | 2 +- .../{metrics => climetrics}/statalert.go | 2 +- .../statappsecengine.go | 2 +- .../{metrics => climetrics}/statappsecrule.go | 2 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 340 ++++++++++++++++++ .../{metrics => climetrics}/statbucket.go | 2 +- .../{metrics => climetrics}/statdecision.go | 2 +- .../{metrics => climetrics}/statlapi.go | 2 +- .../statlapibouncer.go | 2 +- .../statlapidecision.go | 2 +- .../statlapimachine.go | 2 +- .../{metrics => climetrics}/statparser.go | 2 +- .../{metrics => climetrics}/statstash.go | 2 +- .../{metrics => climetrics}/statwhitelist.go | 2 +- .../{metrics => climetrics}/store.go | 67 ++-- .../{metrics => climetrics}/table.go | 9 +- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/support.go | 10 +- pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/schema/metric.go | 2 +- test/bats/08_metrics.bats | 18 +- test/bats/08_metrics_bouncer.bats | 327 +++++++++++++++++ test/bats/08_metrics_machines.bats | 101 ++++++ 27 files changed, 892 insertions(+), 103 deletions(-) rename cmd/crowdsec-cli/{metrics => climetrics}/list.go (61%) rename cmd/crowdsec-cli/{metrics => climetrics}/metrics.go (87%) rename cmd/crowdsec-cli/{metrics => climetrics}/number.go (74%) rename cmd/crowdsec-cli/{metrics => climetrics}/show.go (82%) rename cmd/crowdsec-cli/{metrics => climetrics}/statacquis.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statalert.go (97%) rename cmd/crowdsec-cli/{metrics => climetrics}/statappsecengine.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statappsecrule.go (98%) create mode 100644 cmd/crowdsec-cli/climetrics/statbouncer.go rename cmd/crowdsec-cli/{metrics => climetrics}/statbucket.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statdecision.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapi.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapibouncer.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapidecision.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapimachine.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statparser.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statstash.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statwhitelist.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/store.go (90%) rename cmd/crowdsec-cli/{metrics => climetrics}/table.go (94%) create mode 100644 test/bats/08_metrics_bouncer.bats create mode 100644 test/bats/08_metrics_machines.bats diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/climetrics/list.go similarity index 61% rename from cmd/crowdsec-cli/metrics/list.go rename to cmd/crowdsec-cli/climetrics/list.go index ba68aa4b64d..074f0603555 100644 --- a/cmd/crowdsec-cli/metrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "encoding/json" @@ -6,7 +6,9 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -32,17 +34,36 @@ func (cli *cliMetrics) list() error { }) } - switch cli.cfg().Cscli.Output { + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { case "human": - t := cstable.New(color.Output, cli.cfg().Cscli.Color) - t.SetRowLines(true) - t.SetHeaders("Type", "Title", "Description") + out := color.Output + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Type", "Title", "Description"}) + t.SetColumnConfigs([]table.ColumnConfig{ + { + Name: "Type", + AlignHeader: text.AlignCenter, + }, + { + Name: "Title", + AlignHeader: text.AlignCenter, + }, + { + Name: "Description", + AlignHeader: text.AlignCenter, + WidthMax: 60, + WidthMaxEnforcer: text.WrapSoft, + }, + }) + t.Style().Options.SeparateRows = true for _, metric := range allMetrics { - t.AddRow(metric.Type, metric.Title, metric.Description) + t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - t.Render() + fmt.Fprintln(out, t.Render()) case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { @@ -50,13 +71,8 @@ func (cli *cliMetrics) list() error { } fmt.Println(string(x)) - case "raw": - x, err := yaml.Marshal(allMetrics) - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil diff --git a/cmd/crowdsec-cli/metrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go similarity index 87% rename from cmd/crowdsec-cli/metrics/metrics.go rename to cmd/crowdsec-cli/climetrics/metrics.go index 52d623dc37e..f3bc4874460 100644 --- a/cmd/crowdsec-cli/metrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "github.com/spf13/cobra" @@ -12,7 +12,7 @@ type cliMetrics struct { cfg configGetter } -func NewCLI(cfg configGetter) *cliMetrics { +func New(cfg configGetter) *cliMetrics { return &cliMetrics{ cfg: cfg, } @@ -38,8 +38,8 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.show(nil, url, noUnit) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.show(cmd.Context(), nil, url, noUnit) }, } diff --git a/cmd/crowdsec-cli/metrics/number.go b/cmd/crowdsec-cli/climetrics/number.go similarity index 74% rename from cmd/crowdsec-cli/metrics/number.go rename to cmd/crowdsec-cli/climetrics/number.go index a60d3db2a9b..709b7cf853a 100644 --- a/cmd/crowdsec-cli/metrics/number.go +++ b/cmd/crowdsec-cli/climetrics/number.go @@ -1,8 +1,9 @@ -package metrics +package climetrics import ( "fmt" "math" + "strconv" ) type unit struct { @@ -20,11 +21,15 @@ var ranges = []unit{ {value: 1, symbol: ""}, } -func formatNumber(num int) string { - goodUnit := unit{} +func formatNumber(num int64, withUnit bool) string { + if !withUnit { + return strconv.FormatInt(num, 10) + } + + goodUnit := ranges[len(ranges)-1] for _, u := range ranges { - if int64(num) >= u.value { + if num >= u.value { goodUnit = u break } diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/climetrics/show.go similarity index 82% rename from cmd/crowdsec-cli/metrics/show.go rename to cmd/crowdsec-cli/climetrics/show.go index 46603034f24..7559463b66b 100644 --- a/cmd/crowdsec-cli/metrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -1,11 +1,16 @@ -package metrics +package climetrics import ( + "context" "errors" "fmt" + log "github.com/sirupsen/logrus" + "github.com/fatih/color" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) var ( @@ -13,7 +18,7 @@ var ( ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") ) -func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { +func (cli *cliMetrics) show(ctx context.Context, sections []string, url string, noUnit bool) error { cfg := cli.cfg() if url != "" { @@ -30,8 +35,13 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { ms := NewMetricStore() - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { - return err + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + log.Warnf("unable to open database: %s", err) + } + + if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { + log.Warn(err) } // any section that we don't have in the store is an error @@ -90,9 +100,9 @@ cscli metrics list; cscli metrics list -o json cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { args = expandAlias(args) - return cli.show(args, url, noUnit) + return cli.show(cmd.Context(), args, url, noUnit) }, } diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statacquis.go rename to cmd/crowdsec-cli/climetrics/statacquis.go index c004f64f17a..a18b8a2e2a2 100644 --- a/cmd/crowdsec-cli/metrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go similarity index 97% rename from cmd/crowdsec-cli/metrics/statalert.go rename to cmd/crowdsec-cli/climetrics/statalert.go index c8055910a3a..65009fa322d 100644 --- a/cmd/crowdsec-cli/metrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statappsecengine.go rename to cmd/crowdsec-cli/climetrics/statappsecengine.go index 18ad03ef03f..e4e0048daaf 100644 --- a/cmd/crowdsec-cli/metrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statappsecrule.go rename to cmd/crowdsec-cli/climetrics/statappsecrule.go index 17ec28de99e..9647a111b77 100644 --- a/cmd/crowdsec-cli/metrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "fmt" diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go new file mode 100644 index 00000000000..0105e345313 --- /dev/null +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -0,0 +1,340 @@ +package climetrics + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// un-aggregated data, de-normalized. +type bouncerMetricItem struct { + bouncerName string + ipType string + origin string + name string + unit string + value float64 +} + +type statBouncer struct { + // oldest collection timestamp for each bouncer + oldestTS map[string]*time.Time + // we keep de-normalized metrics so we can iterate + // over them multiple times and keep the aggregation code simple + rawMetrics []bouncerMetricItem + aggregated map[string]map[string]map[string]map[string]int64 + aggregatedAllOrigin map[string]map[string]map[string]int64 +} + +var knownPlurals = map[string]string{ + "byte": "bytes", + "packet": "packets", + "ip": "IPs", +} + +func (s *statBouncer) MarshalJSON() ([]byte, error) { + return json.Marshal(s.aggregated) +} + +func (s *statBouncer) Description() (string, string) { + return "Bouncer Metrics", + `Network traffic blocked by bouncers.` +} + +func warnOnce(warningsLogged map[string]bool, msg string) { + if _, ok := warningsLogged[msg]; !ok { + log.Warningf(msg) + warningsLogged[msg] = true + } +} + +func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { + if db == nil { + return nil + } + + // query all bouncer metrics that have not been flushed + + metrics, err := db.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + ).All(ctx) + if err != nil { + return fmt.Errorf("unable to fetch metrics: %w", err) + } + + s.oldestTS = make(map[string]*time.Time) + + // don't spam the user with the same warnings + warningsLogged := make(map[string]bool) + + for _, met := range metrics { + bouncerName := met.GeneratedBy + + collectedAt := met.CollectedAt + if s.oldestTS[bouncerName] == nil || collectedAt.Before(*s.oldestTS[bouncerName]) { + s.oldestTS[bouncerName] = &collectedAt + } + + type bouncerMetrics struct { + Metrics []models.DetailedMetrics `json:"metrics"` + } + + payload := bouncerMetrics{} + + err := json.Unmarshal([]byte(met.Payload), &payload) + if err != nil { + log.Warningf("while parsing metrics for %s: %s", bouncerName, err) + continue + } + + for _, m := range payload.Metrics { + for _, item := range m.Items { + labels := item.Labels + + // these are mandatory but we got pointers, so... + + valid := true + + if item.Name == nil { + warnOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + // no continue - keep checking the rest + valid = false + } + + if item.Unit == nil { + warnOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + valid = false + } + + if item.Value == nil { + warnOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + valid = false + } + + if !valid { + continue + } + + name := *item.Name + unit := *item.Unit + value := *item.Value + + rawMetric := bouncerMetricItem{ + bouncerName: bouncerName, + ipType: labels["ip_type"], + origin: labels["origin"], + name: name, + unit: unit, + value: value, + } + + s.rawMetrics = append(s.rawMetrics, rawMetric) + } + } + } + + s.aggregate() + + return nil +} + +func (s *statBouncer) aggregate() { + // [bouncer][origin][name][unit]value + if s.aggregated == nil { + s.aggregated = make(map[string]map[string]map[string]map[string]int64) + } + + if s.aggregatedAllOrigin == nil { + s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) + } + + for _, raw := range s.rawMetrics { + if _, ok := s.aggregated[raw.bouncerName]; !ok { + s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { + s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 + } + + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName]; !ok { + s.aggregatedAllOrigin[raw.bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name]; !ok { + s.aggregatedAllOrigin[raw.bouncerName][raw.name] = make(map[string]int64) + } + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit]; !ok { + s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] = 0 + } + + s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] += int64(raw.value) + } +} + +// bouncerTable displays a table of metrics for a single bouncer +func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { + columns := make(map[string]map[string]bool) + for _, item := range s.rawMetrics { + if item.bouncerName != bouncerName { + continue + } + // build a map of the metric names and units, to display dynamic columns + if _, ok := columns[item.name]; !ok { + columns[item.name] = make(map[string]bool) + } + columns[item.name][item.unit] = true + } + + // no metrics for this bouncer, skip. how did we get here ? + // anyway we can't honor the "showEmpty" flag in this case, + // we don't heven have the table headers + + if len(columns) == 0 { + return + } + + t := cstable.New(out, wantColor).Writer + header1 := table.Row{"Origin"} + header2 := table.Row{""} + colNum := 1 + + colCfg := []table.ColumnConfig{{ + Number:colNum, + AlignHeader: + text.AlignLeft, + Align: text.AlignLeft, + AlignFooter: text.AlignRight, + }} + + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + colNum += 1 + header1 = append(header1, name) + + // we don't add "s" to random words + if knownPlurals[unit] != "" { + unit = knownPlurals[unit] + } + + header2 = append(header2, unit) + colCfg = append(colCfg, table.ColumnConfig{ + Number: colNum, + AlignHeader: text.AlignCenter, + Align: text.AlignRight, + AlignFooter: text.AlignRight}, + ) + } + } + + t.AppendHeader(header1, table.RowConfig{AutoMerge: true}) + t.AppendHeader(header2) + + t.SetColumnConfigs(colCfg) + + numRows := 0 + + // sort all the ranges for stable output + + for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + if origin == "" { + // if the metric has no origin (i.e. processed bytes/packets) + // we don't display it in the table body but it still gets aggreagted + // in the footer's totals + continue + } + + metrics := s.aggregated[bouncerName][origin] + + // some users don't know what capi is + if origin == "CAPI" { + origin += " (community blocklist)" + } + + row := table.Row{origin} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + valStr := "-" + val, ok := metrics[name][unit] + if ok { + valStr = formatNumber(val, !noUnit) + } + row = append(row, valStr) + } + } + t.AppendRow(row) + + numRows += 1 + } + + totals := s.aggregatedAllOrigin[bouncerName] + + if numRows == 0 { + t.Style().Options.SeparateFooter = false + } + + footer := table.Row{"Total"} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + footer = append(footer, formatNumber(totals[name][unit], !noUnit)) + } + } + + t.AppendFooter(footer) + + title, _ := s.Description() + title = fmt.Sprintf("%s (%s)", title, bouncerName) + if s.oldestTS != nil { + // if we change this to .Local() beware of tests + title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) + } + title += ":" + + // don't use SetTitle() because it draws the title inside table box + // TODO: newline position wrt other stat tables + cstable.RenderTitle(out, title) + fmt.Fprintln(out, t.Render()) +} + +// Table displays a table of metrics for each bouncer +func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { + bouncerNames := make(map[string]bool) + for _, item := range s.rawMetrics { + bouncerNames[item.bouncerName] = true + } + + nl := false + for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + if nl { + // empty line between tables + fmt.Fprintln(out) + } + s.bouncerTable(out, bouncerName, wantColor, noUnit) + nl = true + } +} diff --git a/cmd/crowdsec-cli/metrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statbucket.go rename to cmd/crowdsec-cli/climetrics/statbucket.go index 62ca4dee71d..836fa0ed1ab 100644 --- a/cmd/crowdsec-cli/metrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statdecision.go rename to cmd/crowdsec-cli/climetrics/statdecision.go index b1474d95f76..485644a55ba 100644 --- a/cmd/crowdsec-cli/metrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapi.go rename to cmd/crowdsec-cli/climetrics/statlapi.go index f8a737e5c44..7d8831aad74 100644 --- a/cmd/crowdsec-cli/metrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapibouncer.go rename to cmd/crowdsec-cli/climetrics/statlapibouncer.go index e7483c6294d..3ee35adfe9a 100644 --- a/cmd/crowdsec-cli/metrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapidecision.go rename to cmd/crowdsec-cli/climetrics/statlapidecision.go index 97e17fe8a49..5f4d2c07764 100644 --- a/cmd/crowdsec-cli/metrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapimachine.go rename to cmd/crowdsec-cli/climetrics/statlapimachine.go index 6b9d9da207e..2f81ccb5751 100644 --- a/cmd/crowdsec-cli/metrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statparser.go rename to cmd/crowdsec-cli/climetrics/statparser.go index d8d651f269f..58ce2248648 100644 --- a/cmd/crowdsec-cli/metrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statstash.go rename to cmd/crowdsec-cli/climetrics/statstash.go index 79c14b04fd6..9de3469bea1 100644 --- a/cmd/crowdsec-cli/metrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statwhitelist.go rename to cmd/crowdsec-cli/climetrics/statwhitelist.go index 89a016d22b0..6848452458b 100644 --- a/cmd/crowdsec-cli/metrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/climetrics/store.go similarity index 90% rename from cmd/crowdsec-cli/metrics/store.go rename to cmd/crowdsec-cli/climetrics/store.go index 48926488c07..5de50558e89 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -1,6 +1,7 @@ -package metrics +package climetrics import ( + "context" "encoding/json" "fmt" "io" @@ -12,10 +13,11 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/prom2json" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/database" ) type metricSection interface { @@ -28,22 +30,31 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ "acquisition": statAcquis{}, - "scenarios": statBucket{}, - "parsers": statParser{}, + "alerts": statAlert{}, + "bouncers": &statBouncer{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, + "decisions": statDecision{}, "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, "lapi-bouncer": statLapiBouncer{}, "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, + "lapi-machine": statLapiMachine{}, + "parsers": statParser{}, + "scenarios": statBucket{}, "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, "whitelists": statWhitelist{}, } } -func (ms metricStore) Fetch(url string) error { +func (ms metricStore) Fetch(ctx context.Context, url string, db *database.Client) error { + if err := ms["bouncers"].(*statBouncer).Fetch(ctx, db); err != nil { + return err + } + + return ms.fetchPrometheusMetrics(url) +} + +func (ms metricStore) fetchPrometheusMetrics(url string) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -59,7 +70,7 @@ func (ms metricStore) Fetch(url string) error { err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { - errChan <- fmt.Errorf("failed to fetch metrics: %w", err) + errChan <- fmt.Errorf("while fetching metrics: %w", err) return } errChan <- nil @@ -75,19 +86,23 @@ func (ms metricStore) Fetch(url string) error { } log.Debugf("Finished reading metrics output, %d entries", len(result)) - /*walk*/ + ms.processPrometheusMetrics(result) + return nil +} + +func (ms metricStore) processPrometheusMetrics(result []*prom2json.Family) { mAcquis := ms["acquisition"].(statAcquis) - mParser := ms["parsers"].(statParser) - mBucket := ms["scenarios"].(statBucket) + mAlert := ms["alerts"].(statAlert) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mDecision := ms["decisions"].(statDecision) mLapi := ms["lapi"].(statLapi) - mLapiMachine := ms["lapi-machine"].(statLapiMachine) mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) mLapiDecision := ms["lapi-decisions"].(statLapiDecision) - mDecision := ms["decisions"].(statDecision) - mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) - mAppsecRule := ms["appsec-rule"].(statAppsecRule) - mAlert := ms["alerts"].(statAlert) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mParser := ms["parsers"].(statParser) + mBucket := ms["scenarios"].(statBucket) mStash := ms["stash"].(statStash) mWhitelist := ms["whitelists"].(statWhitelist) @@ -219,11 +234,9 @@ func (ms metricStore) Fetch(url string) error { } } } - - return nil } -func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, outputFormat string, noUnit bool) error { // copy only the sections we want want := map[string]metricSection{} @@ -239,7 +252,7 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, want[section] = ms[section] } - switch formatType { + switch outputFormat { case "human": for _, section := range maptools.SortedKeys(want) { want[section].Table(out, wantColor, noUnit, showEmpty) @@ -250,14 +263,8 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) - case "raw": - x, err := yaml.Marshal(want) - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) default: - return fmt.Errorf("unknown format type %s", formatType) + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil diff --git a/cmd/crowdsec-cli/metrics/table.go b/cmd/crowdsec-cli/climetrics/table.go similarity index 94% rename from cmd/crowdsec-cli/metrics/table.go rename to cmd/crowdsec-cli/climetrics/table.go index f51e905ba71..c446f2267e9 100644 --- a/cmd/crowdsec-cli/metrics/table.go +++ b/cmd/crowdsec-cli/climetrics/table.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "errors" @@ -110,12 +110,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { - numberToShow := strconv.Itoa(v) - if !noUnit { - numberToShow = formatNumber(v) - } - - row = append(row, numberToShow) + row = append(row, formatNumber(int64(v), !noUnit)) } else { row = append(row, "-") } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index bd9d8988132..d4046414030 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,7 +14,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -252,7 +252,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) - cmd.AddCommand(metrics.NewCLI(cli.cfg).NewCommand()) + cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 1b33ef38ada..324be2710fd 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -78,7 +78,7 @@ func stripAnsiString(str string) string { return reStripAnsi.ReplaceAllString(str, "") } -func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { +func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw *zip.Writer) error { log.Info("Collecting prometheus metrics") cfg := cli.cfg() @@ -89,9 +89,9 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { humanMetrics := new(bytes.Buffer) - ms := metrics.NewMetricStore() + ms := climetrics.NewMetricStore() - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { return err } @@ -493,7 +493,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { skipCAPI = true } - if err = cli.dumpMetrics(ctx, zipWriter); err != nil { + if err = cli.dumpMetrics(ctx, db, zipWriter); err != nil { log.Warn(err) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 5b436830192..c1ce25bddef 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -255,7 +255,7 @@ var ( {Name: "generated_by", Type: field.TypeString}, {Name: "collected_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, - {Name: "payload", Type: field.TypeString}, + {Name: "payload", Type: field.TypeString, Size: 2147483647}, } // MetricsTable holds the schema information for the "metrics" table. MetricsTable = &schema.Table{ diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index 9de3f21f464..b47da78bdf3 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -28,7 +28,7 @@ func (Metric) Fields() []ent.Field { Nillable(). Optional(). Comment("When the metrics are sent to the console"), - field.String("payload"). + field.Text("payload"). Immutable(). Comment("The actual metrics (item0)"), } diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index 8bf30812cff..e260e667524 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -23,9 +23,9 @@ teardown() { #---------- @test "cscli metrics (crowdsec not running)" { - rune -1 cscli metrics - # crowdsec is down - assert_stderr --partial 'failed to fetch metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' + rune -0 cscli metrics + # crowdsec is down, we won't get an error because some metrics come from the db instead + assert_stderr --partial 'while fetching metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' } @test "cscli metrics (bad configuration)" { @@ -72,10 +72,6 @@ teardown() { rune -0 jq 'keys' <(output) assert_output --partial '"alerts",' assert_output --partial '"parsers",' - - rune -0 cscli metrics -o raw - assert_output --partial 'alerts: {}' - assert_output --partial 'parsers: {}' } @test "cscli metrics list" { @@ -85,10 +81,6 @@ teardown() { rune -0 cscli metrics list -o json rune -0 jq -c '.[] | [.type,.title]' <(output) assert_line '["acquisition","Acquisition Metrics"]' - - rune -0 cscli metrics list -o raw - assert_line "- type: acquisition" - assert_line " title: Acquisition Metrics" } @test "cscli metrics show" { @@ -108,8 +100,4 @@ teardown() { rune -0 cscli metrics show lapi -o json rune -0 jq -c '.lapi."/v1/watchers/login" | keys' <(output) assert_json '["POST"]' - - rune -0 cscli metrics show lapi -o raw - assert_line 'lapi:' - assert_line ' /v1/watchers/login:' } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats new file mode 100644 index 00000000000..778452644dd --- /dev/null +++ b/test/bats/08_metrics_bouncer.bats @@ -0,0 +1,327 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start + skip "require the usage_metrics endpoint on apiserver" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli metrics show bouncers" { + # there are no bouncers, so no metrics yet + rune -0 cscli metrics show bouncers + refute_output +} + +@test "rc usage metrics (empty payload)" { + # a registered bouncer can send metrics for the lapi and console + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing remediation component data"}' +} + +@test "rc usage metrics (bad payload)" { + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + log_processors: [] + EOT + ) + + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + remediation_components.0.utc_startup_timestamp in body is required + EOT + + # validation, like timestamp format + + payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = "2021-09-01T00:00:00Z"' <<<"$payload") + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 400" + assert_json '{message: "json: cannot unmarshal string into Go struct field AllMetrics.remediation_components of type int64"}' + + payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + refute_output +} + +@test "rc usage metrics (good payload)" { + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + log_processors: [] + EOT + ) + + # bouncers have feature flags too + + payload=$(yq -o j ' + .remediation_components[0].feature_flags = ["huey", "dewey", "louie"] | + .remediation_components[0].os = {"name": "Multics", "version": "MR12.5"} + ' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli bouncer inspect testbouncer -o json + rune -0 yq -o j '[.os,.featureflags]' <(output) + assert_json '["Multics/MR12.5",["huey","dewey","louie"]]' + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707399316, "window_size_seconds":600}, + "items":[ + {"name": "foo", "unit": "pound", "value": 3.1415926}, + {"name": "foo", "unit": "pound", "value": 2.7182818}, + {"name": "foo", "unit": "dogyear", "value": 2.7182818} + ] + } + ] + ' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + # aggregation is ok -- we are truncating, not rounding, because the float is mandated by swagger. + # but without labels the origin string is empty + assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +--------+-----------------+ + | Origin | foo | + | | dogyear | pound | + +--------+---------+-------+ + | Total | 2 | 5 | + +--------+---------+-------+ + EOT + + # some more realistic values, at least for the labels + # we don't use the same now_timestamp or the payload will be silently discarded + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 51936, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, + {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, + {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "dropped", "unit": "byte", "value": 1034, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, + {"name": "dropped", "unit": "byte", "value": 3847, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "byte", "value": 380, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, + {"name": "dropped", "unit": "packet", "value": 100, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, + {"name": "dropped", "unit": "packet", "value": 10, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "dropped", "unit": "packet", "value": 23, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, + {"name": "dropped", "unit": "packet", "value": 58, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv4", "origin": "lists:anotherlist"}}, + {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "lists:anotherlist"}}, + {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + assert_json '{ + "bouncers": { + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 1 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 51936 + }, + "dropped": { + "byte": 3847, + "packet": 58 + }, + }, + "lists:anotherlist": { + "dropped": { + "byte": 0, + "packet": 0 + } + } + } + } + }' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------------+------------------+-------------------+-----------------+ + | Origin | active_decisions | dropped | foo | + | | IPs | bytes | packets | dogyear | pound | + +----------------------------------+------------------+---------+---------+---------+-------+ + | CAPI (community blocklist) | - | 3.80k | 100 | - | - | + | cscli | 1 | 380 | 10 | - | - | + | lists:anotherlist | - | 0 | 0 | - | - | + | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | + | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | + +----------------------------------+------------------+---------+---------+---------+-------+ + | Total | 51.94k | 9.06k | 191 | 2 | 5 | + +----------------------------------+------------------+---------+---------+---------+-------+ + EOT + + # TODO: multiple item lists + +} + +@test "rc usage metrics (multiple bouncers)" { + # multiple bouncers have separate totals and can have different types of metrics and units -> different columns + + API_KEY=$(cscli bouncers add bouncer1 -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + metrics: + - meta: + utc_now_timestamp: 1707399316 + window_size_seconds: 600 + items: + - name: dropped + unit: byte + value: 1000 + labels: + origin: CAPI + - name: dropped + unit: byte + value: 800 + labels: + origin: lists:somelist + - name: processed + unit: byte + value: 12340 + - name: processed + unit: packet + value: 100 + EOT + ) + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + API_KEY=$(cscli bouncers add bouncer2 -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707379316 + metrics: + - meta: + utc_now_timestamp: 1707389316 + window_size_seconds: 600 + items: + - name: dropped + unit: byte + value: 1500 + labels: + origin: lists:somelist + - name: dropped + unit: byte + value: 2000 + labels: + origin: CAPI + - name: dropped + unit: packet + value: 20 + labels: + origin: lists:somelist + EOT + ) + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers:{bouncer1:{"":{processed:{byte:12340,packet:100}},CAPI:{dropped:{byte:1000}},"lists:somelist":{dropped:{byte:800}}},bouncer2:{"lists:somelist":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------+---------+-----------------------+ + | Origin | dropped | processed | + | | bytes | bytes | packets | + +----------------------------+---------+-----------+-----------+ + | CAPI (community blocklist) | 1.00k | - | - | + | lists:somelist | 800 | - | - | + +----------------------------+---------+-----------+-----------+ + | Total | 1.80k | 12.34k | 100 | + +----------------------------+---------+-----------+-----------+ + + Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: + +----------------------------+-------------------+ + | Origin | dropped | + | | bytes | packets | + +----------------------------+---------+---------+ + | CAPI (community blocklist) | 2.00k | - | + | lists:somelist | 1.50k | 20 | + +----------------------------+---------+---------+ + | Total | 3.50k | 20 | + +----------------------------+---------+---------+ + EOT +} diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats new file mode 100644 index 00000000000..e63078124a9 --- /dev/null +++ b/test/bats/08_metrics_machines.bats @@ -0,0 +1,101 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start + skip "require the usage_metrics endpoint on apiserver" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "lp usage metrics (empty payload)" { + # a registered log processor can send metrics for the lapi and console + TOKEN=$(lp-get-token) + export TOKEN + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -22 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing log processor data"}' +} + +@test "lp usage metrics (bad payload)" { + TOKEN=$(lp-get-token) + export TOKEN + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + EOT + ) + + rune -22 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + log_processors.0.utc_startup_timestamp in body is required + log_processors.0.datasources in body is required + log_processors.0.hub_items in body is required + EOT +} + +@test "lp usage metrics (full payload)" { + TOKEN=$(lp-get-token) + export TOKEN + + # base payload without any measurement + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + hub_items: {} + feature_flags: + - marshmallows + os: + name: CentOS + version: "8" + metrics: + - name: logs_parsed + value: 5000 + unit: count + labels: {} + items: [] + meta: + window_size_seconds: 600 + utc_now_timestamp: 1707485349 + console_options: + - share_context + datasources: + syslog: 1 + file: 4 + EOT + ) + + rune -0 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + refute_output +} From 91463836ff4fcd19ba8525fe1c6aa992a9180057 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 10:51:45 +0200 Subject: [PATCH 219/318] tests: increase delta for flaky float comparison (#3122) * tests: increase delta for flaky float comparison * remove leading 0 from non-octal number --- pkg/exprhelpers/exprlib_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 33025d4a992..b9ba1d01191 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1361,7 +1361,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { }, } - delta := 0.0001 + delta := 0.001 for _, test := range tests { program, err := expr.Compile(test.code, GetExprOptions(test.env)...) @@ -1392,12 +1392,12 @@ func TestParseUnixTime(t *testing.T) { { name: "ParseUnix() test: valid value with milli", value: "1672239773.3590894", - expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC), + expected: time.Date(2022, 12, 28, 15, 2, 53, 0, time.UTC), }, { name: "ParseUnix() test: valid value without milli", value: "1672239773", - expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC), + expected: time.Date(2022, 12, 28, 15, 2, 53, 0, time.UTC), }, { name: "ParseUnix() test: invalid input", From 5390b8ea4b121bc8949b885d1582bce9306a0c94 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:05:40 +0200 Subject: [PATCH 220/318] remove warning "maxopenconns is 0, default to 100" (#3129) * remove warning "maxopenconns is 0, default to 100" also don't store as pointer since value 0 is not useful * lint --- pkg/csconfig/api_test.go | 2 +- pkg/csconfig/config.go | 2 +- pkg/csconfig/database.go | 7 ++++--- pkg/csconfig/database_test.go | 11 ++++++----- pkg/database/database.go | 19 ++++++++----------- 5 files changed, 20 insertions(+), 21 deletions(-) diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 079936253a1..51a4c5ad602 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -191,7 +191,7 @@ func TestLoadAPIServer(t *testing.T) { DbConfig: &DatabaseCfg{ DbPath: "./testdata/test.db", Type: "sqlite", - MaxOpenConns: ptr.Of(DEFAULT_MAX_OPEN_CONNS), + MaxOpenConns: DEFAULT_MAX_OPEN_CONNS, UseWal: ptr.Of(true), // autodetected DecisionBulkSize: defaultDecisionBulkSize, }, diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index ed71af4029a..3bbdf607187 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -158,7 +158,7 @@ func NewDefaultConfig() *Config { dbConfig := DatabaseCfg{ Type: "sqlite", DbPath: DefaultDataPath("crowdsec.db"), - MaxOpenConns: ptr.Of(DEFAULT_MAX_OPEN_CONNS), + MaxOpenConns: DEFAULT_MAX_OPEN_CONNS, } globalCfg := Config{ diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index d30cc20a938..a24eb9e13c3 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -33,7 +33,7 @@ type DatabaseCfg struct { Type string `yaml:"type"` Flush *FlushDBCfg `yaml:"flush"` LogLevel *log.Level `yaml:"log_level"` - MaxOpenConns *int `yaml:"max_open_conns,omitempty"` + MaxOpenConns int `yaml:"max_open_conns,omitempty"` UseWal *bool `yaml:"use_wal,omitempty"` DecisionBulkSize int `yaml:"decision_bulk_size,omitempty"` } @@ -68,8 +68,8 @@ func (c *Config) LoadDBConfig(inCli bool) error { c.API.Server.DbConfig = c.DbConfig } - if c.DbConfig.MaxOpenConns == nil { - c.DbConfig.MaxOpenConns = ptr.Of(DEFAULT_MAX_OPEN_CONNS) + if c.DbConfig.MaxOpenConns == 0 { + c.DbConfig.MaxOpenConns = DEFAULT_MAX_OPEN_CONNS } if !inCli && c.DbConfig.Type == "sqlite" { @@ -134,6 +134,7 @@ func (d *DatabaseCfg) ConnectionString() string { } else { connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", d.User, d.Password, d.Host, d.Port, d.DbName) } + if d.Sslmode != "" { connString = fmt.Sprintf("%s&tls=%s", connString, d.Sslmode) } diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go index 954b1c47fd7..4a1ef807f97 100644 --- a/pkg/csconfig/database_test.go +++ b/pkg/csconfig/database_test.go @@ -22,7 +22,7 @@ func TestLoadDBConfig(t *testing.T) { DbConfig: &DatabaseCfg{ Type: "sqlite", DbPath: "./testdata/test.db", - MaxOpenConns: ptr.Of(10), + MaxOpenConns: 10, }, Cscli: &CscliCfg{}, API: &APICfg{ @@ -30,10 +30,10 @@ func TestLoadDBConfig(t *testing.T) { }, }, expected: &DatabaseCfg{ - Type: "sqlite", - DbPath: "./testdata/test.db", - MaxOpenConns: ptr.Of(10), - UseWal: ptr.Of(true), + Type: "sqlite", + DbPath: "./testdata/test.db", + MaxOpenConns: 10, + UseWal: ptr.Of(true), DecisionBulkSize: defaultDecisionBulkSize, }, }, @@ -49,6 +49,7 @@ func TestLoadDBConfig(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadDBConfig(false) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/database/database.go b/pkg/database/database.go index 6f392c46d21..e513459199f 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -14,8 +14,6 @@ import ( _ "github.com/mattn/go-sqlite3" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -37,12 +35,11 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. return nil, err } - if config.MaxOpenConns == nil { - log.Warningf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) - config.MaxOpenConns = ptr.Of(csconfig.DEFAULT_MAX_OPEN_CONNS) + if config.MaxOpenConns == 0 { + config.MaxOpenConns = csconfig.DEFAULT_MAX_OPEN_CONNS } - db.SetMaxOpenConns(*config.MaxOpenConns) + db.SetMaxOpenConns(config.MaxOpenConns) drv := entsql.OpenDB(dbdialect, db) return drv, nil @@ -74,7 +71,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro if config.Type == "sqlite" { /*if it's the first startup, we want to touch and chmod file*/ - if _, err := os.Stat(config.DbPath); os.IsNotExist(err) { + if _, err = os.Stat(config.DbPath); os.IsNotExist(err) { f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { return nil, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) @@ -85,14 +82,14 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro } } // Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) - if err := setFilePerm(config.DbPath, 0o640); err != nil { - return nil, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) + if err = setFilePerm(config.DbPath, 0o640); err != nil { + return nil, fmt.Errorf("unable to set perms on %s: %w", config.DbPath, err) } } drv, err := getEntDriver(typ, dia, config.ConnectionString(), config) if err != nil { - return nil, fmt.Errorf("failed opening connection to %s: %v", config.Type, err) + return nil, fmt.Errorf("failed opening connection to %s: %w", config.Type, err) } client = ent.NewClient(ent.Driver(drv), entOpt) @@ -104,7 +101,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro } if err = client.Schema.Create(ctx); err != nil { - return nil, fmt.Errorf("failed creating schema resources: %v", err) + return nil, fmt.Errorf("failed creating schema resources: %w", err) } return &Client{ From c4da2775cdef20f395d158ecb942b18a65f94bad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:26:59 +0200 Subject: [PATCH 221/318] usage metrics: validate maxLength for some elements (#3131) Co-authored-by: Sebastien Blot --- pkg/models/base_metrics.go | 5 +++++ pkg/models/localapi_swagger.yaml | 7 +++++++ pkg/models/metrics_detail_item.go | 10 ++++++++++ pkg/models/metrics_labels.go | 15 +++++++++++++++ pkg/models/o_sversion.go | 10 ++++++++++ 5 files changed, 47 insertions(+) diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go index 154d9004afe..94691ea233e 100644 --- a/pkg/models/base_metrics.go +++ b/pkg/models/base_metrics.go @@ -35,6 +35,7 @@ type BaseMetrics struct { // version of the remediation component // Required: true + // Max Length: 255 Version *string `json:"version"` } @@ -124,6 +125,10 @@ func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("version", "body", *m.Version, 255); err != nil { + return err + } + return nil } diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index d726f452a16..ba14880e7c5 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1095,6 +1095,7 @@ definitions: version: type: string description: version of the remediation component + maxLength: 255 os: $ref: '#/definitions/OSversion' metrics: @@ -1107,6 +1108,7 @@ definitions: items: type: string description: feature flags (expected to be empty for remediation components) + maxLength: 255 utc_startup_timestamp: type: integer description: UTC timestamp of the startup of the software @@ -1120,9 +1122,11 @@ definitions: name: type: string description: name of the OS + maxLength: 255 version: type: string description: version of the OS + maxLength: 255 required: - name - version @@ -1146,12 +1150,14 @@ definitions: name: type: string description: name of the metric + maxLength: 255 value: type: number description: value of the metric unit: type: string description: unit of the metric + maxLength: 255 labels: $ref: '#/definitions/MetricsLabels' description: labels of the metric @@ -1178,6 +1184,7 @@ definitions: additionalProperties: type: string description: label of the metric + maxLength: 255 ConsoleOptions: title: ConsoleOptions type: array diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go index 889f7e263d2..bb237884fcf 100644 --- a/pkg/models/metrics_detail_item.go +++ b/pkg/models/metrics_detail_item.go @@ -24,10 +24,12 @@ type MetricsDetailItem struct { // name of the metric // Required: true + // Max Length: 255 Name *string `json:"name"` // unit of the metric // Required: true + // Max Length: 255 Unit *string `json:"unit"` // value of the metric @@ -86,6 +88,10 @@ func (m *MetricsDetailItem) validateName(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("name", "body", *m.Name, 255); err != nil { + return err + } + return nil } @@ -95,6 +101,10 @@ func (m *MetricsDetailItem) validateUnit(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("unit", "body", *m.Unit, 255); err != nil { + return err + } + return nil } diff --git a/pkg/models/metrics_labels.go b/pkg/models/metrics_labels.go index d807a88bc8d..176a15cce24 100644 --- a/pkg/models/metrics_labels.go +++ b/pkg/models/metrics_labels.go @@ -8,7 +8,9 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" ) // MetricsLabels MetricsLabels @@ -18,6 +20,19 @@ type MetricsLabels map[string]string // Validate validates this metrics labels func (m MetricsLabels) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.MaxLength(k, "body", m[k], 255); err != nil { + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } return nil } diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go index eb670409c90..8f1f43ea9cc 100644 --- a/pkg/models/o_sversion.go +++ b/pkg/models/o_sversion.go @@ -21,10 +21,12 @@ type OSversion struct { // name of the OS // Required: true + // Max Length: 255 Name *string `json:"name"` // version of the OS // Required: true + // Max Length: 255 Version *string `json:"version"` } @@ -52,6 +54,10 @@ func (m *OSversion) validateName(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("name", "body", *m.Name, 255); err != nil { + return err + } + return nil } @@ -61,6 +67,10 @@ func (m *OSversion) validateVersion(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("version", "body", *m.Version, 255); err != nil { + return err + } + return nil } From 84c214a6f0984dbe7eb6fe2384241b305ce04438 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 15 Jul 2024 12:31:01 +0100 Subject: [PATCH 222/318] enhance: add crowdsec user agent to cti do request func (#3130) --- pkg/cticlient/client.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index b95d1237619..b817121e222 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -8,6 +8,7 @@ import ( "net/http" "strings" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" log "github.com/sirupsen/logrus" ) @@ -43,7 +44,10 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map if err != nil { return nil, err } + req.Header.Set("X-Api-Key", c.apiKey) + req.Header.Set("User-Agent", cwversion.UserAgent()) + resp, err := c.httpClient.Do(req) if err != nil { return nil, err From c4bfdf19914a88671663f8caae5a5ea849c1b3a6 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 16 Jul 2024 10:08:00 +0200 Subject: [PATCH 223/318] Store alert remediations status in DB (#3115) --- cmd/crowdsec-cli/alerts.go | 1 + cmd/crowdsec-cli/decisions.go | 7 +-- pkg/apiserver/controllers/v1/alerts.go | 1 + pkg/database/alerts.go | 6 +-- pkg/database/ent/alert.go | 13 ++++- pkg/database/ent/alert/alert.go | 8 +++ pkg/database/ent/alert/where.go | 25 +++++++++ pkg/database/ent/alert_create.go | 18 +++++++ pkg/database/ent/alert_update.go | 6 +++ pkg/database/ent/migrate/schema.go | 3 +- pkg/database/ent/mutation.go | 75 +++++++++++++++++++++++++- pkg/database/ent/schema/alert.go | 1 + 12 files changed, 155 insertions(+), 9 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 0bb310739d9..37f9ab435c7 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -120,6 +120,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro - Date : {{.CreatedAt}} - Machine : {{.MachineID}} - Simulation : {{.Simulated}} + - Remediation : {{.Remediation}} - Reason : {{.Scenario}} - Events Count : {{.EventsCount}} - Scope:Value : {{.Source.Scope}}{{if .Source.Value}}:{{.Source.Value}}{{end}} diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 92a0de72e58..d485c90254f 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -374,9 +374,10 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a Scope: &addScope, Value: &addValue, }, - StartAt: &startAt, - StopAt: &stopAt, - CreatedAt: createdAt, + StartAt: &startAt, + StopAt: &stopAt, + CreatedAt: createdAt, + Remediation: true, } alerts = append(alerts, &alert) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index c8cd54203bc..82dc51d6879 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -43,6 +43,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { Capacity: &alert.Capacity, Leakspeed: &alert.LeakSpeed, Simulated: &alert.Simulated, + Remediation: alert.Remediation, UUID: alert.UUID, Source: &models.Source{ Scope: &alert.SourceScope, diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 3563adba68c..0f6d87fb1b6 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -241,7 +241,8 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in SetLeakSpeed(*alertItem.Leakspeed). SetSimulated(*alertItem.Simulated). SetScenarioVersion(*alertItem.ScenarioVersion). - SetScenarioHash(*alertItem.ScenarioHash) + SetScenarioHash(*alertItem.ScenarioHash). + SetRemediation(true) // it's from CAPI, we always have decisions alertRef, err := alertB.Save(c.CTX) if err != nil { @@ -554,7 +555,6 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(metaItem.Value) > 4095 { c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) - value = value[:4095] } @@ -618,6 +618,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ SetSimulated(*alertItem.Simulated). SetScenarioVersion(*alertItem.ScenarioVersion). SetScenarioHash(*alertItem.ScenarioHash). + SetRemediation(alertItem.Remediation). SetUUID(alertItem.UUID). AddEvents(events...). AddMetas(metas...) @@ -677,7 +678,6 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } } } - return ret, nil } diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 8bfe0badc09..eb0e1cb7612 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -64,6 +64,8 @@ type Alert struct { Simulated bool `json:"simulated,omitempty"` // UUID holds the value of the "uuid" field. UUID string `json:"uuid,omitempty"` + // Remediation holds the value of the "remediation" field. + Remediation bool `json:"remediation,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the AlertQuery when eager-loading is set. Edges AlertEdges `json:"edges"` @@ -129,7 +131,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case alert.FieldSimulated: + case alert.FieldSimulated, alert.FieldRemediation: values[i] = new(sql.NullBool) case alert.FieldSourceLatitude, alert.FieldSourceLongitude: values[i] = new(sql.NullFloat64) @@ -300,6 +302,12 @@ func (a *Alert) assignValues(columns []string, values []any) error { } else if value.Valid { a.UUID = value.String } + case alert.FieldRemediation: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field remediation", values[i]) + } else if value.Valid { + a.Remediation = value.Bool + } case alert.ForeignKeys[0]: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for edge-field machine_alerts", value) @@ -431,6 +439,9 @@ func (a *Alert) String() string { builder.WriteString(", ") builder.WriteString("uuid=") builder.WriteString(a.UUID) + builder.WriteString(", ") + builder.WriteString("remediation=") + builder.WriteString(fmt.Sprintf("%v", a.Remediation)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index 16e0b019e14..62aade98e87 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -60,6 +60,8 @@ const ( FieldSimulated = "simulated" // FieldUUID holds the string denoting the uuid field in the database. FieldUUID = "uuid" + // FieldRemediation holds the string denoting the remediation field in the database. + FieldRemediation = "remediation" // EdgeOwner holds the string denoting the owner edge name in mutations. EdgeOwner = "owner" // EdgeDecisions holds the string denoting the decisions edge name in mutations. @@ -126,6 +128,7 @@ var Columns = []string{ FieldScenarioHash, FieldSimulated, FieldUUID, + FieldRemediation, } // ForeignKeys holds the SQL foreign-keys that are owned by the "alerts" @@ -293,6 +296,11 @@ func ByUUID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldUUID, opts...).ToFunc() } +// ByRemediation orders the results by the remediation field. +func ByRemediation(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRemediation, opts...).ToFunc() +} + // ByOwnerField orders the results by owner field. func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index c109b78704b..da6080fffb9 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -170,6 +170,11 @@ func UUID(v string) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldUUID, v)) } +// Remediation applies equality check predicate on the "remediation" field. It's identical to RemediationEQ. +func Remediation(v bool) predicate.Alert { + return predicate.Alert(sql.FieldEQ(FieldRemediation, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldCreatedAt, v)) @@ -1600,6 +1605,26 @@ func UUIDContainsFold(v string) predicate.Alert { return predicate.Alert(sql.FieldContainsFold(FieldUUID, v)) } +// RemediationEQ applies the EQ predicate on the "remediation" field. +func RemediationEQ(v bool) predicate.Alert { + return predicate.Alert(sql.FieldEQ(FieldRemediation, v)) +} + +// RemediationNEQ applies the NEQ predicate on the "remediation" field. +func RemediationNEQ(v bool) predicate.Alert { + return predicate.Alert(sql.FieldNEQ(FieldRemediation, v)) +} + +// RemediationIsNil applies the IsNil predicate on the "remediation" field. +func RemediationIsNil() predicate.Alert { + return predicate.Alert(sql.FieldIsNull(FieldRemediation)) +} + +// RemediationNotNil applies the NotNil predicate on the "remediation" field. +func RemediationNotNil() predicate.Alert { + return predicate.Alert(sql.FieldNotNull(FieldRemediation)) +} + // HasOwner applies the HasEdge predicate on the "owner" edge. func HasOwner() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index 45a6e40b64f..753183a9eb9 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -338,6 +338,20 @@ func (ac *AlertCreate) SetNillableUUID(s *string) *AlertCreate { return ac } +// SetRemediation sets the "remediation" field. +func (ac *AlertCreate) SetRemediation(b bool) *AlertCreate { + ac.mutation.SetRemediation(b) + return ac +} + +// SetNillableRemediation sets the "remediation" field if the given value is not nil. +func (ac *AlertCreate) SetNillableRemediation(b *bool) *AlertCreate { + if b != nil { + ac.SetRemediation(*b) + } + return ac +} + // SetOwnerID sets the "owner" edge to the Machine entity by ID. func (ac *AlertCreate) SetOwnerID(id int) *AlertCreate { ac.mutation.SetOwnerID(id) @@ -603,6 +617,10 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { _spec.SetField(alert.FieldUUID, field.TypeString, value) _node.UUID = value } + if value, ok := ac.mutation.Remediation(); ok { + _spec.SetField(alert.FieldRemediation, field.TypeBool, value) + _node.Remediation = value + } if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index 48ce221ac82..5f0e01ac09f 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -281,6 +281,9 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { if au.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } + if au.mutation.RemediationCleared() { + _spec.ClearField(alert.FieldRemediation, field.TypeBool) + } if au.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -744,6 +747,9 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error if auo.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } + if auo.mutation.RemediationCleared() { + _spec.ClearField(alert.FieldRemediation, field.TypeBool) + } if auo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index c1ce25bddef..60bf72a486b 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -34,6 +34,7 @@ var ( {Name: "scenario_hash", Type: field.TypeString, Nullable: true}, {Name: "simulated", Type: field.TypeBool, Default: false}, {Name: "uuid", Type: field.TypeString, Nullable: true}, + {Name: "remediation", Type: field.TypeBool, Nullable: true}, {Name: "machine_alerts", Type: field.TypeInt, Nullable: true}, } // AlertsTable holds the schema information for the "alerts" table. @@ -44,7 +45,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "alerts_machines_alerts", - Columns: []*schema.Column{AlertsColumns[24]}, + Columns: []*schema.Column{AlertsColumns[25]}, RefColumns: []*schema.Column{MachinesColumns[0]}, OnDelete: schema.SetNull, }, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 573e0b5daa9..5b70457c512 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -77,6 +77,7 @@ type AlertMutation struct { scenarioHash *string simulated *bool uuid *string + remediation *bool clearedFields map[string]struct{} owner *int clearedowner bool @@ -1351,6 +1352,55 @@ func (m *AlertMutation) ResetUUID() { delete(m.clearedFields, alert.FieldUUID) } +// SetRemediation sets the "remediation" field. +func (m *AlertMutation) SetRemediation(b bool) { + m.remediation = &b +} + +// Remediation returns the value of the "remediation" field in the mutation. +func (m *AlertMutation) Remediation() (r bool, exists bool) { + v := m.remediation + if v == nil { + return + } + return *v, true +} + +// OldRemediation returns the old "remediation" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldRemediation(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRemediation is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRemediation requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRemediation: %w", err) + } + return oldValue.Remediation, nil +} + +// ClearRemediation clears the value of the "remediation" field. +func (m *AlertMutation) ClearRemediation() { + m.remediation = nil + m.clearedFields[alert.FieldRemediation] = struct{}{} +} + +// RemediationCleared returns if the "remediation" field was cleared in this mutation. +func (m *AlertMutation) RemediationCleared() bool { + _, ok := m.clearedFields[alert.FieldRemediation] + return ok +} + +// ResetRemediation resets all changes to the "remediation" field. +func (m *AlertMutation) ResetRemediation() { + m.remediation = nil + delete(m.clearedFields, alert.FieldRemediation) +} + // SetOwnerID sets the "owner" edge to the Machine entity by id. func (m *AlertMutation) SetOwnerID(id int) { m.owner = &id @@ -1586,7 +1636,7 @@ func (m *AlertMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *AlertMutation) Fields() []string { - fields := make([]string, 0, 23) + fields := make([]string, 0, 24) if m.created_at != nil { fields = append(fields, alert.FieldCreatedAt) } @@ -1656,6 +1706,9 @@ func (m *AlertMutation) Fields() []string { if m.uuid != nil { fields = append(fields, alert.FieldUUID) } + if m.remediation != nil { + fields = append(fields, alert.FieldRemediation) + } return fields } @@ -1710,6 +1763,8 @@ func (m *AlertMutation) Field(name string) (ent.Value, bool) { return m.Simulated() case alert.FieldUUID: return m.UUID() + case alert.FieldRemediation: + return m.Remediation() } return nil, false } @@ -1765,6 +1820,8 @@ func (m *AlertMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldSimulated(ctx) case alert.FieldUUID: return m.OldUUID(ctx) + case alert.FieldRemediation: + return m.OldRemediation(ctx) } return nil, fmt.Errorf("unknown Alert field %s", name) } @@ -1935,6 +1992,13 @@ func (m *AlertMutation) SetField(name string, value ent.Value) error { } m.SetUUID(v) return nil + case alert.FieldRemediation: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRemediation(v) + return nil } return fmt.Errorf("unknown Alert field %s", name) } @@ -2073,6 +2137,9 @@ func (m *AlertMutation) ClearedFields() []string { if m.FieldCleared(alert.FieldUUID) { fields = append(fields, alert.FieldUUID) } + if m.FieldCleared(alert.FieldRemediation) { + fields = append(fields, alert.FieldRemediation) + } return fields } @@ -2144,6 +2211,9 @@ func (m *AlertMutation) ClearField(name string) error { case alert.FieldUUID: m.ClearUUID() return nil + case alert.FieldRemediation: + m.ClearRemediation() + return nil } return fmt.Errorf("unknown Alert nullable field %s", name) } @@ -2221,6 +2291,9 @@ func (m *AlertMutation) ResetField(name string) error { case alert.FieldUUID: m.ResetUUID() return nil + case alert.FieldRemediation: + m.ResetRemediation() + return nil } return fmt.Errorf("unknown Alert field %s", name) } diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index 343979e3db7..87ace24aa84 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -52,6 +52,7 @@ func (Alert) Fields() []ent.Field { field.String("scenarioHash").Optional().Immutable(), field.Bool("simulated").Default(false).Immutable(), field.String("uuid").Optional().Immutable(), // this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert + field.Bool("remediation").Optional().Immutable(), } } From 189fb9ca1b594f07f834e92af6f970230b075863 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 17 Jul 2024 09:50:31 +0200 Subject: [PATCH 224/318] deps: use go-cs-lib 0.13 (#3136) --- go.mod | 2 +- go.sum | 4 ++-- pkg/cwversion/version.go | 17 +++-------------- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 1bc63a470df..6a2146efc5f 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.11 + github.com/crowdsecurity/go-cs-lib v0.0.13 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ba4e6267bb9..faca9797341 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.11 h1:ygUOKrkMLaJ2wjC020LgtY6XDkToNFK4NmYlhpkk5ko= -github.com/crowdsecurity/go-cs-lib v0.0.11/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.13 h1:asmtjIEPOibUK8eaYQCIR7XIBU/EX5vyAp1EbKFQJtY= +github.com/crowdsecurity/go-cs-lib v0.0.13/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 01509833c1c..28d5c2a621c 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -10,7 +10,7 @@ import ( ) var ( - Codename string // = "SoumSoum" + Codename string // = "SoumSoum" Libre2 = "WebAssembly" ) @@ -21,19 +21,8 @@ const ( Constraint_acquis = ">= 1.0, < 2.0" ) -func versionWithTag() string { - // if the version number already contains the tag, don't duplicate it - ret := version.Version - - if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g"+version.Tag+"-dirty") { - ret += "-" + version.Tag - } - - return ret -} - func FullString() string { - ret := fmt.Sprintf("version: %s\n", versionWithTag()) + ret := fmt.Sprintf("version: %s\n", version.String()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) @@ -49,7 +38,7 @@ func FullString() string { } func UserAgent() string { - return "crowdsec/" + versionWithTag() + "-" + version.System + return "crowdsec/" + version.String() + "-" + version.System } // VersionStrip remove the tag from the version string, used to match with a hub branch From 8d96ddd48eec924dfd377d54869442a98d7be1f9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 17 Jul 2024 12:30:52 +0200 Subject: [PATCH 225/318] refact cscli metrics: fix lines between tables, skip wrapper api (#3137) * fix empty line between metrics tables * refact metrics tables: use go-pretty api directly * lint --- cmd/crowdsec-cli/bouncers.go | 4 +- cmd/crowdsec-cli/climetrics/list.go | 17 ++++--- cmd/crowdsec-cli/climetrics/statacquis.go | 13 +++-- cmd/crowdsec-cli/climetrics/statalert.go | 18 +++---- .../climetrics/statappsecengine.go | 13 +++-- cmd/crowdsec-cli/climetrics/statappsecrule.go | 14 +++--- cmd/crowdsec-cli/climetrics/statbouncer.go | 43 +++++++++------- cmd/crowdsec-cli/climetrics/statbucket.go | 13 +++-- cmd/crowdsec-cli/climetrics/statdecision.go | 18 +++---- cmd/crowdsec-cli/climetrics/statlapi.go | 29 ++++------- .../climetrics/statlapibouncer.go | 13 +++-- .../climetrics/statlapidecision.go | 17 +++---- .../climetrics/statlapimachine.go | 13 +++-- cmd/crowdsec-cli/climetrics/statparser.go | 13 +++-- cmd/crowdsec-cli/climetrics/statstash.go | 18 +++---- cmd/crowdsec-cli/climetrics/statwhitelist.go | 13 +++-- cmd/crowdsec-cli/climetrics/table.go | 23 ++++----- cmd/crowdsec-cli/cstable/cstable.go | 12 ----- cmd/crowdsec-cli/machines.go | 6 +-- cmd/crowdsec-cli/utils_table.go | 50 +++++++++---------- 20 files changed, 164 insertions(+), 196 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index f7237b8216c..8057cba98c9 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -142,7 +142,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } // bouncerInfo contains only the data we want for inspect/list @@ -475,7 +475,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index 074f0603555..d3afbef0669 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -3,12 +3,12 @@ package climetrics import ( "encoding/json" "fmt" + "io" "github.com/fatih/color" - "github.com/spf13/cobra" - "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" + "github.com/spf13/cobra" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -43,27 +43,28 @@ func (cli *cliMetrics) list() error { t.AppendHeader(table.Row{"Type", "Title", "Description"}) t.SetColumnConfigs([]table.ColumnConfig{ { - Name: "Type", + Name: "Type", AlignHeader: text.AlignCenter, }, { - Name: "Title", + Name: "Title", AlignHeader: text.AlignCenter, }, { - Name: "Description", - AlignHeader: text.AlignCenter, - WidthMax: 60, + Name: "Description", + AlignHeader: text.AlignCenter, + WidthMax: 60, WidthMaxEnforcer: text.WrapSoft, }, }) + t.Style().Options.SeparateRows = true for _, metric := range allMetrics { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index a18b8a2e2a2..827dcf036c3 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -28,10 +28,8 @@ func (s statAcquis) Process(source, metric string, val int) { } func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted"}) keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} @@ -39,7 +37,8 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index 65009fa322d..e48dd6c924f 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -21,25 +21,25 @@ func (s statAlert) Process(reason string, val int) { } func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Reason", "Count"}) numRows := 0 + // TODO: sort keys for scenario, hits := range s { - t.AddRow( + t.AppendRow(table.Row{ scenario, strconv.Itoa(hits), - ) + }) numRows++ } if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index e4e0048daaf..4a249e11687 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -25,10 +25,8 @@ func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { } func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Appsec Engine", "Processed", "Blocked") - t.SetAlignment(text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Appsec Engine", "Processed", "Blocked"}) keys := []string{"processed", "blocked"} @@ -36,7 +34,8 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index 9647a111b77..2f859d70cfb 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -30,19 +30,19 @@ func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, } func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + // TODO: sort keys for appsecEngine, appsecEngineRulesStats := range s { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Rule ID", "Triggered") - t.SetAlignment(text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Rule ID", "Triggered"}) keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) - t.Render() + io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } } diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 0105e345313..1a803cefbd2 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -34,8 +34,8 @@ type statBouncer struct { oldestTS map[string]*time.Time // we keep de-normalized metrics so we can iterate // over them multiple times and keep the aggregation code simple - rawMetrics []bouncerMetricItem - aggregated map[string]map[string]map[string]map[string]int64 + rawMetrics []bouncerMetricItem + aggregated map[string]map[string]map[string]map[string]int64 aggregatedAllOrigin map[string]map[string]map[string]int64 } @@ -57,6 +57,7 @@ func (s *statBouncer) Description() (string, string) { func warnOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { log.Warningf(msg) + warningsLogged[msg] = true } } @@ -200,6 +201,7 @@ func (s *statBouncer) aggregate() { // bouncerTable displays a table of metrics for a single bouncer func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { columns := make(map[string]map[string]bool) + for _, item := range s.rawMetrics { if item.bouncerName != bouncerName { continue @@ -208,6 +210,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor if _, ok := columns[item.name]; !ok { columns[item.name] = make(map[string]bool) } + columns[item.name][item.unit] = true } @@ -225,16 +228,16 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor colNum := 1 colCfg := []table.ColumnConfig{{ - Number:colNum, - AlignHeader: - text.AlignLeft, - Align: text.AlignLeft, + Number: colNum, + AlignHeader: text.AlignLeft, + Align: text.AlignLeft, AlignFooter: text.AlignRight, }} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { colNum += 1 + header1 = append(header1, name) // we don't add "s" to random words @@ -244,11 +247,11 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor header2 = append(header2, unit) colCfg = append(colCfg, table.ColumnConfig{ - Number: colNum, + Number: colNum, AlignHeader: text.AlignCenter, - Align: text.AlignRight, - AlignFooter: text.AlignRight}, - ) + Align: text.AlignRight, + AlignFooter: text.AlignRight, + }) } } @@ -277,16 +280,20 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } row := table.Row{origin} + for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { valStr := "-" + val, ok := metrics[name][unit] if ok { valStr = formatNumber(val, !noUnit) } + row = append(row, valStr) } } + t.AppendRow(row) numRows += 1 @@ -299,6 +306,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } footer := table.Row{"Total"} + for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { footer = append(footer, formatNumber(totals[name][unit], !noUnit)) @@ -309,16 +317,19 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title, _ := s.Description() title = fmt.Sprintf("%s (%s)", title, bouncerName) + if s.oldestTS != nil { // if we change this to .Local() beware of tests title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } + title += ":" // don't use SetTitle() because it draws the title inside table box - // TODO: newline position wrt other stat tables - cstable.RenderTitle(out, title) - fmt.Fprintln(out, t.Render()) + io.WriteString(out, title+"\n") + io.WriteString(out, t.Render() + "\n") + // empty line between tables + io.WriteString(out, "\n") } // Table displays a table of metrics for each bouncer @@ -328,13 +339,7 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool bouncerNames[item.bouncerName] = true } - nl := false for _, bouncerName := range maptools.SortedKeys(bouncerNames) { - if nl { - // empty line between tables - fmt.Fprintln(out) - } s.bouncerTable(out, bouncerName, wantColor, noUnit) - nl = true } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 836fa0ed1ab..507d9f3a476 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -26,10 +26,8 @@ func (s statBucket) Process(bucket, metric string, val int) { } func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} @@ -37,7 +35,8 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index 485644a55ba..145665cfba2 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -30,22 +30,21 @@ func (s statDecision) Process(reason, origin, action string, val int) { } func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Origin", "Action", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Reason", "Origin", "Action", "Count"}) numRows := 0 + // TODO: sort by reason, origin, action for reason, origins := range s { for origin, actions := range origins { for action, hits := range actions { - t.AddRow( + t.AppendRow(table.Row{ reason, origin, action, strconv.Itoa(hits), - ) + }) numRows++ } @@ -54,7 +53,8 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 7d8831aad74..45b384708bf 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -2,10 +2,9 @@ package climetrics import ( "io" - "sort" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -28,10 +27,8 @@ func (s statLapi) Process(route, method string, val int) { } func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Route", "Method", "Hits"}) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -39,21 +36,12 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] - subKeys := []string{} - for skey := range astats { - subKeys = append(subKeys, skey) - } - - sort.Strings(subKeys) - - for _, sl := range subKeys { - row := []string{ + for _, sl := range maptools.SortedKeys(astats) { + t.AppendRow(table.Row{ alabel, sl, strconv.Itoa(astats[sl]), - } - - t.AddRow(row...) + }) numRows++ } @@ -61,7 +49,8 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 3ee35adfe9a..828ccb33413 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -28,16 +28,15 @@ func (s statLapiBouncer) Process(bouncer, route, method string, val int) { } func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Bouncer", "Route", "Method", "Hits"}) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index 5f4d2c07764..ffc999555c1 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -40,26 +40,25 @@ func (s statLapiDecision) Process(bouncer, fam string, val int) { } func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Bouncer", "Empty answers", "Non-empty answers"}) numRows := 0 for bouncer, hits := range s { - t.AddRow( + t.AppendRow(table.Row{ bouncer, strconv.Itoa(hits.Empty), strconv.Itoa(hits.NonEmpty), - ) + }) numRows++ } if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 2f81ccb5751..09abe2dd44b 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -28,16 +28,15 @@ func (s statLapiMachine) Process(machine, route, method string, val int) { } func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Machine", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Machine", "Route", "Method", "Hits"}) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 58ce2248648..0b3512052b9 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -27,10 +27,8 @@ func (s statParser) Process(parser, metric string, val int) { } func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) keys := []string{"hits", "parsed", "unparsed"} @@ -38,7 +36,8 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 9de3469bea1..5938ac05fc8 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -32,10 +32,8 @@ func (s statStash) Process(name, mtype string, val int) { } func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Name", "Type", "Items") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Name", "Type", "Items"}) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -43,19 +41,19 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] - row := []string{ + t.AppendRow(table.Row{ alabel, astats.Type, strconv.Itoa(astats.Count), - } - t.AddRow(row...) + }) numRows++ } if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index 6848452458b..ccb7e52153b 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -29,16 +29,15 @@ func (s statWhitelist) Process(whitelist, reason, metric string, val int) { } func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Whitelist", "Reason", "Hits", "Whitelisted"}) if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/table.go b/cmd/crowdsec-cli/climetrics/table.go index c446f2267e9..af13edce2f5 100644 --- a/cmd/crowdsec-cli/climetrics/table.go +++ b/cmd/crowdsec-cli/climetrics/table.go @@ -5,17 +5,16 @@ import ( "sort" "strconv" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/maptools" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. var ErrNilTable = errors.New("nil table") -func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { +func lapiMetricsToTable(t table.Writer, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count // sort keys to keep consistent order when printing machineKeys := []string{} @@ -32,7 +31,7 @@ func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string machineRow := stats[machine] for routeName, route := range machineRow { for methodName, count := range route { - row := []string{ + row := table.Row{ machine, routeName, methodName, @@ -43,7 +42,7 @@ func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string row = append(row, "-") } - t.AddRow(row...) + t.AppendRow(row) numRows++ } @@ -53,7 +52,7 @@ func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string return numRows } -func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { +func wlMetricsToTable(t table.Writer, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -62,7 +61,7 @@ func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]i for _, name := range maptools.SortedKeys(stats) { for _, reason := range maptools.SortedKeys(stats[name]) { - row := []string{ + row := table.Row{ name, reason, "-", @@ -82,7 +81,7 @@ func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]i } } - t.AddRow(row...) + t.AppendRow(row) numRows++ } @@ -91,7 +90,7 @@ func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]i return numRows, nil } -func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { +func metricsToTable(t table.Writer, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -104,9 +103,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st continue } - row := []string{ - alabel, - } + row := table.Row{alabel} for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { @@ -116,7 +113,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st } } - t.AddRow(row...) + t.AppendRow(row) numRows++ } diff --git a/cmd/crowdsec-cli/cstable/cstable.go b/cmd/crowdsec-cli/cstable/cstable.go index f7ddb604d84..85ba491f4e8 100644 --- a/cmd/crowdsec-cli/cstable/cstable.go +++ b/cmd/crowdsec-cli/cstable/cstable.go @@ -13,18 +13,6 @@ import ( isatty "github.com/mattn/go-isatty" ) -func RenderTitle(out io.Writer, title string) { - if out == nil { - panic("renderTableTitle: out is nil") - } - - if title == "" { - return - } - - fmt.Fprintln(out, title) -} - func shouldWeColorize(wantColor string) bool { switch wantColor { case "yes": diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 3489550b4fd..e50a17f02dc 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -184,7 +184,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } } @@ -206,7 +206,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } // machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. @@ -658,7 +658,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index c0043e47ee3..6df16cd85f5 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -5,7 +5,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -13,31 +13,29 @@ import ( ) func listHubItemTable(out io.Writer, wantColor string, title string, items []*cwhub.Item) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path"}) for _, item := range items { status := fmt.Sprintf("%v %s", item.State.Emoji(), item.State.Text()) - t.AddRow(item.Name, status, item.State.LocalVersion, item.State.LocalPath) + t.AppendRow(table.Row{item.Name, status, item.State.LocalVersion, item.State.LocalPath}) } - cstable.RenderTitle(out, title) - t.Render() + io.WriteString(out, title+"\n") + io.WriteString(out, t.Render() + "\n") } func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Inband Hits", "Outband Hits") + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) - t.AddRow( + t.AppendRow(table.Row{ strconv.Itoa(metrics["inband_hits"]), strconv.Itoa(metrics["outband_hits"]), - ) + }) - cstable.RenderTitle(out, fmt.Sprintf("\n - (AppSec Rule) %s:", itemName)) - t.Render() + io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) + io.WriteString(out, t.Render() + "\n") } func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -45,43 +43,43 @@ func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metr return } - t := cstable.New(out, wantColor) - t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) - t.AddRow( + t.AppendRow(table.Row{ strconv.Itoa(metrics["curr_count"]), strconv.Itoa(metrics["overflow"]), strconv.Itoa(metrics["instantiation"]), strconv.Itoa(metrics["pour"]), strconv.Itoa(metrics["underflow"]), - ) + }) - cstable.RenderTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) - t.Render() + io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) + io.WriteString(out, t.Render() + "\n") } func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { - t := cstable.New(out, wantColor) - t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) // don't show table if no hits showTable := false for source, stats := range metrics { if stats["hits"] > 0 { - t.AddRow( + t.AppendRow(table.Row{ source, strconv.Itoa(stats["hits"]), strconv.Itoa(stats["parsed"]), strconv.Itoa(stats["unparsed"]), - ) + }) showTable = true } } if showTable { - cstable.RenderTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) - t.Render() + io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) + io.WriteString(out, t.Render() + "\n") } } From aabb5cca890f32c0d4fabf8c630fa3a1872c8f71 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 17 Jul 2024 12:48:07 +0200 Subject: [PATCH 226/318] CI: use go 1.22.5 (#3128) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index e537aee8d1b..be6fc0b8a73 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 659102ced56..3fcc5a6a209 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 1d12a371430..76342a47603 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index e72973e1b1f..87e60b071d5 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index d8011b16247..9860a171233 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ed3f7b4a4c2..982ecedbb25 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 5c2ef0e0b0d..4580061e7d2 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 7f192a85b8f..01844ddddc7 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index a5ce1b37df3..72211cb5180 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 45443e971ac..7976c61e256 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.4-alpine3.20 AS build +FROM golang:1.22.5-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 6fc5a15f766..56836163c96 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.4-bookworm AS build +FROM golang:1.22.5-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 77015d18ef3..d73637fff6b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.4' + version: '1.22.5' - pwsh: | choco install -y make From 443ec37e47942faf23822842cee8458f5e3486aa Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 18 Jul 2024 09:39:57 +0200 Subject: [PATCH 227/318] lint: dockerfiles (#3138) --- Dockerfile | 8 ++++---- Dockerfile.debian | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7976c61e256..b9569065137 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ ENV BUILD_VERSION=${BUILD_VERSION} # wizard.sh requires GNU coreutils RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold coreutils pkgconfig && \ - wget https://github.com/google/re2/archive/refs/tags/${RE2_VERSION}.tar.gz && \ + wget -q https://github.com/google/re2/archive/refs/tags/${RE2_VERSION}.tar.gz && \ tar -xzf ${RE2_VERSION}.tar.gz && \ cd re2-${RE2_VERSION} && \ make install && \ @@ -32,7 +32,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM alpine:latest as slim +FROM alpine:latest AS slim RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash rsync && \ mkdir -p /staging/etc/crowdsec && \ @@ -47,9 +47,9 @@ COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/conf COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml -ENTRYPOINT /bin/bash /docker_start.sh +ENTRYPOINT ["/bin/bash", "/docker_start.sh"] -FROM slim as full +FROM slim AS full # Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp # The files are here for reference, as users will need to mount a new version to be actually able to use notifications diff --git a/Dockerfile.debian b/Dockerfile.debian index 56836163c96..a256cfa9ab3 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -37,7 +37,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM debian:bookworm-slim as slim +FROM debian:bookworm-slim AS slim ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" @@ -63,9 +63,9 @@ COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/conf RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml && \ yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml -ENTRYPOINT /bin/bash docker_start.sh +ENTRYPOINT ["/bin/bash", "docker_start.sh"] -FROM slim as plugins +FROM slim AS plugins # Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp # The files are here for reference, as users will need to mount a new version to be actually able to use notifications @@ -79,10 +79,10 @@ COPY --from=build \ COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins -FROM slim as geoip +FROM slim AS geoip COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec -FROM plugins as full +FROM plugins AS full COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec From 8f1abc300d6315ec3785cf64e5fb8a914e4c4126 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 18 Jul 2024 09:58:45 +0200 Subject: [PATCH 228/318] docker: make sure the sqlite db is present before changing GID (#3140) --- docker/docker_start.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/docker_start.sh b/docker/docker_start.sh index a3e9226bced..4db421e7c87 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -333,6 +333,8 @@ fi # crowdsec sqlite database permissions if [ "$GID" != "" ]; then if istrue "$(conf_get '.db_config.type == "sqlite"')"; then + # force the creation of the db file(s) + cscli machines inspect create-db --error >/dev/null 2>&1 || : # don't fail if the db is not there yet if chown -f ":$GID" "$(conf_get '.db_config.db_path')" 2>/dev/null; then echo "sqlite database permissions updated" From 35f97d4855943fdaab5c0528a2c5d94f16b758cf Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 18 Jul 2024 11:13:18 +0200 Subject: [PATCH 229/318] bats: curl helpers to mock log processors and bouncers (#3141) --- test/bats/10_bouncers.bats | 18 +++++++++++++ test/lib/db/instance-mysql | 2 +- test/lib/setup_file.sh | 55 +++++++++++++++++++++++++++++++++----- 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index b6efbd06650..a89c9f9dd65 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -42,6 +42,24 @@ teardown() { assert_json '[]' } +@test "bouncer api-key auth" { + rune -0 cscli bouncers add ciTestBouncer --key "goodkey" + + # connect with good credentials + rune -0 curl-tcp "/v1/decisions" -sS --fail-with-body -H "X-Api-Key: goodkey" + assert_output null + + # connect with bad credentials + rune -22 curl-tcp "/v1/decisions" -sS --fail-with-body -H "X-Api-Key: badkey" + assert_stderr --partial 'error: 403' + assert_json '{message:"access forbidden"}' + + # connect with no credentials + rune -22 curl-tcp "/v1/decisions" -sS --fail-with-body + assert_stderr --partial 'error: 403' + assert_json '{message:"access forbidden"}' +} + @test "bouncers delete has autocompletion" { rune -0 cscli bouncers add foo1 rune -0 cscli bouncers add foo2 diff --git a/test/lib/db/instance-mysql b/test/lib/db/instance-mysql index 6b40c84acba..df38f09761f 100755 --- a/test/lib/db/instance-mysql +++ b/test/lib/db/instance-mysql @@ -21,7 +21,7 @@ about() { check_requirements() { if ! command -v mysql >/dev/null; then - die "missing required program 'mysql' as a mysql client (package mariadb-client-core-10.6 on debian like system)" + die "missing required program 'mysql' as a mysql client (package mariadb-client on debian like system)" fi } diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 65c600d1769..7cbced01ef1 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -282,18 +282,61 @@ rune() { } export -f rune -# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) -# after $1, pass throught extra arguments to curl -curl-with-key() { +# call the lapi through unix socket +# the path (and query string) must be the first parameter, the others will be passed to curl +curl-socket() { [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } local path=$1 shift - [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } local socket socket=$(config_get '.api.server.listen_socket') [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } - # curl needs a fake hostname when using a unix socket - curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$path" "$@" + curl --unix-socket "$socket" "http://lapi$path" "$@" +} +export -f curl-socket + +# call the lapi through tcp +# the path (and query string) must be the first parameter, the others will be passed to curl +curl-tcp() { + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 + shift + local cred + cred=$(config_get .api.client.credentials_path) + local base_url + base_url="$(yq '.url' < "$cred")" + curl "$base_url$path" "$@" +} +export -f curl-tcp + +# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) +# after $1, pass throught extra arguments to curl +curl-with-key() { + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } + curl-tcp "$@" -sS --fail-with-body -H "X-Api-Key: $API_KEY" } export -f curl-with-key + +# call the lapi through unix socket with a TOKEN (authenticates as a machine) +# after $1, pass throught extra arguments to curl +curl-with-token() { + [[ -z "$TOKEN" ]] && { fail "${FUNCNAME[0]}: missing TOKEN"; } + # curl needs a fake hostname when using a unix socket + curl-tcp "$@" -sS --fail-with-body -H "Authorization: Bearer $TOKEN" +} +export -f curl-with-token + +# as a log processor, connect to lapi and get a token +lp-get-token() { + local cred + cred=$(config_get .api.client.credentials_path) + local resp + resp=$(yq -oj -I0 '{"machine_id":.login,"password":.password}' < "$cred" | curl-socket '/v1/watchers/login' -s -X POST --data-binary @-) + if [[ "$(yq -e '.code' <<<"$resp")" != 200 ]]; then + echo "login_lp: failed to login" >&3 + return 1 + fi + echo "$resp" | yq -r '.token' +} +export -f lp-get-token From 5b4f924d5f2855813a1205fee740d677d7129885 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 10:54:39 +0200 Subject: [PATCH 230/318] allow .index.json to embed item content (#3145) --- pkg/cwhub/item.go | 1 + pkg/cwhub/itemupgrade.go | 50 ++++++++++++++++++++++++++++++---- pkg/cwhub/testdata/index1.json | 8 +----- pkg/cwhub/testdata/index2.json | 9 +----- test/bats/20_hub.bats | 2 +- test/lib/setup_file.sh | 2 +- 6 files changed, 49 insertions(+), 23 deletions(-) diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 4249a20e134..32d1acf94ff 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -109,6 +109,7 @@ type Item struct { Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml Description string `json:"description,omitempty" yaml:"description,omitempty"` + Content string `json:"content,omitempty" yaml:"-"` Author string `json:"author,omitempty" yaml:"author,omitempty"` References []string `json:"references,omitempty" yaml:"references,omitempty"` diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index d74544ddaed..1aebb3caf29 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -4,9 +4,13 @@ package cwhub import ( "context" + "crypto" + "encoding/base64" + "encoding/hex" "errors" "fmt" "os" + "path/filepath" "github.com/sirupsen/logrus" @@ -110,16 +114,50 @@ func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bo // FetchContentTo downloads the last version of the item's YAML file to the specified path. func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { + wantHash := i.latestHash() + if wantHash == "" { + return false, "", errors.New("latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again") + } + + // Use the embedded content if available + if i.Content != "" { + // the content was historically base64 encoded + content, err := base64.StdEncoding.DecodeString(i.Content) + if err != nil { + content = []byte(i.Content) + } + + dir := filepath.Dir(destPath) + + if err := os.MkdirAll(dir, 0o755); err != nil { + return false, "", fmt.Errorf("while creating %s: %w", dir, err) + } + + // check sha256 + hash := crypto.SHA256.New() + if _, err := hash.Write(content); err != nil { + return false, "", fmt.Errorf("while hashing %s: %w", i.Name, err) + } + + gotHash := hex.EncodeToString(hash.Sum(nil)) + if gotHash != wantHash { + return false, "", fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) + } + + if err := os.WriteFile(destPath, content, 0o600); err != nil { + return false, "", fmt.Errorf("while writing %s: %w", destPath, err) + } + + i.hub.logger.Debugf("Wrote %s content from .index.json to %s", i.Name, destPath) + + return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil + } + url, err := i.hub.remote.urlTo(i.RemotePath) if err != nil { return false, "", fmt.Errorf("failed to build request: %w", err) } - wantHash := i.latestHash() - if wantHash == "" { - return false, "", errors.New("latest hash missing from index") - } - d := downloader. New(). WithHTTPClient(hubClient). @@ -167,7 +205,7 @@ func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { downloaded, _, err := i.FetchContentTo(ctx, finalPath) if err != nil { - return false, fmt.Errorf("while downloading %s: %w", i.Name, err) + return false, err } if downloaded { diff --git a/pkg/cwhub/testdata/index1.json b/pkg/cwhub/testdata/index1.json index a7e6ef6153b..59548bda379 100644 --- a/pkg/cwhub/testdata/index1.json +++ b/pkg/cwhub/testdata/index1.json @@ -10,7 +10,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -34,7 +33,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "test_collection : foobar", "author": "crowdsecurity", "labels": null, @@ -52,7 +50,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -73,7 +70,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -89,7 +85,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -107,7 +102,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "a foobar scenario", "author": "crowdsecurity", "labels": { @@ -118,4 +112,4 @@ } } } -} \ No newline at end of file +} diff --git a/pkg/cwhub/testdata/index2.json b/pkg/cwhub/testdata/index2.json index 7f97ebf2308..41c4ccba83a 100644 --- a/pkg/cwhub/testdata/index2.json +++ b/pkg/cwhub/testdata/index2.json @@ -10,7 +10,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -38,7 +37,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "test_collection : foobar", "author": "crowdsecurity", "labels": null, @@ -57,7 +55,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -78,7 +75,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -94,7 +90,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -112,7 +107,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "a foobar scenario", "author": "crowdsecurity", "labels": { @@ -132,7 +126,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "a foobar scenario", "author": "crowdsecurity", "labels": { @@ -143,4 +136,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index 0d9f29b2418..b8fa1e9efca 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -76,7 +76,7 @@ teardown() { assert_stderr --partial "invalid hub item appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest version missing from index" rune -1 cscli appsec-rules install crowdsecurity/vpatch-laravel-debug-mode --force - assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': while downloading crowdsecurity/vpatch-laravel-debug-mode: latest hash missing from index" + assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" } @test "missing reference in hub index" { diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 7cbced01ef1..39a084596e2 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -265,7 +265,7 @@ hub_strip_index() { local INDEX INDEX=$(config_get .config_paths.index_path) local hub_min - hub_min=$(jq <"$INDEX" 'del(..|.content?) | del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') + hub_min=$(jq <"$INDEX" 'del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') echo "$hub_min" >"$INDEX" } export -f hub_strip_index From 30c0d8997d24e7b8323ee54ea1541cae10ddc108 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:12:27 +0200 Subject: [PATCH 231/318] CI: update action versions (#3143) --- .github/workflows/docker-tests.yml | 4 ++-- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-docker.yml | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index d3ae4f90d79..228a0829984 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -35,10 +35,10 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 with: - config: .github/buildkit.toml + buildkitd-config: .github/buildkit.toml - name: "Build image" - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile${{ matrix.flavor == 'debian' && '.debian' || '' }} diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 4580061e7d2..cfea128acf7 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -55,7 +55,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.59 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 01844ddddc7..ab45bd88717 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -156,7 +156,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.59 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index 005db0cc9d1..11b4401c6da 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -47,7 +47,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 with: - config: .github/buildkit.toml + buildkitd-config: .github/buildkit.toml - name: Login to DockerHub uses: docker/login-action@v3 @@ -93,7 +93,7 @@ jobs: - name: Build and push image (slim) if: ${{ inputs.slim }} - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile${{ inputs.debian && '.debian' || '' }} @@ -109,7 +109,7 @@ jobs: BUILD_VERSION=${{ inputs.crowdsec_version }} - name: Build and push image (full) - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile${{ inputs.debian && '.debian' || '' }} From a3d7900b5f625163bf1b2e4a3dada969cefab4fe Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 22 Jul 2024 12:14:46 +0200 Subject: [PATCH 232/318] update expr (#3144) --- .github/workflows/go-tests-windows.yml | 2 +- Makefile | 7 +- cmd/crowdsec-cli/config_show.go | 2 +- go.mod | 1 + go.sum | 2 + pkg/acquisition/acquisition.go | 4 +- pkg/alertcontext/alertcontext.go | 4 +- pkg/appsec/appsec.go | 4 +- pkg/csprofiles/csprofiles.go | 4 +- pkg/exprhelpers/debugger.go | 144 +++++++++---------------- pkg/exprhelpers/debugger_test.go | 4 +- pkg/exprhelpers/exprlib_test.go | 2 +- pkg/exprhelpers/helpers.go | 2 +- pkg/exprhelpers/jsonextract_test.go | 2 +- pkg/hubtest/parser_assert.go | 2 +- pkg/hubtest/scenario_assert.go | 2 +- pkg/leakybucket/bayesian.go | 4 +- pkg/leakybucket/conditional.go | 4 +- pkg/leakybucket/manager_load.go | 4 +- pkg/leakybucket/overflow_filter.go | 4 +- pkg/leakybucket/reset_filter.go | 4 +- pkg/leakybucket/uniq.go | 4 +- pkg/parser/grok_pattern.go | 2 +- pkg/parser/node.go | 5 +- pkg/parser/whitelist.go | 4 +- pkg/setup/detect.go | 2 +- pkg/types/event.go | 2 +- 27 files changed, 93 insertions(+), 134 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index cfea128acf7..9d8051821c8 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -43,7 +43,7 @@ jobs: - name: Run tests run: | go install github.com/kyoh86/richgo@v0.3.10 - go test -coverprofile coverage.out -covermode=atomic ./... > out.txt + go test -tags expr_debug -coverprofile coverage.out -covermode=atomic ./... > out.txt if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter diff --git a/Makefile b/Makefile index 97eb7ba0eea..207b5d610f0 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,8 @@ ifneq (,$(DOCKER_BUILD)) LD_OPTS_VARS += -X 'github.com/crowdsecurity/go-cs-lib/version.System=docker' endif -GO_TAGS := netgo,osusergo,sqlite_omit_load_extension +#expr_debug tag is required to enable the debug mode in expr +GO_TAGS := netgo,osusergo,sqlite_omit_load_extension,expr_debug # this will be used by Go in the make target, some distributions require it export PKG_CONFIG_PATH:=/usr/local/lib/pkgconfig:$(PKG_CONFIG_PATH) @@ -220,11 +221,11 @@ testenv: .PHONY: test test: testenv ## Run unit tests with localstack - $(GOTEST) $(LD_OPTS) ./... + $(GOTEST) --tags=$(GO_TAGS) $(LD_OPTS) ./... .PHONY: go-acc go-acc: testenv ## Run unit tests with localstack + coverage - go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models -- $(LD_OPTS) + go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models --tags $(GO_TAGS) -- $(LD_OPTS) check_docker: @if ! docker info > /dev/null 2>&1; then \ diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index c7138c98e33..e411f5a322b 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -6,7 +6,7 @@ import ( "os" "text/template" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" "github.com/sanity-io/litter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" diff --git a/go.mod b/go.mod index 6a2146efc5f..f36bbcd996d 100644 --- a/go.mod +++ b/go.mod @@ -111,6 +111,7 @@ require ( github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/expr-lang/expr v1.16.9 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index faca9797341..d4cc2651f0f 100644 --- a/go.sum +++ b/go.sum @@ -127,6 +127,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= +github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 5e4a663eb9e..634557021f1 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -7,8 +7,8 @@ import ( "os" "strings" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 9946d694363..c502def32cd 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -6,8 +6,8 @@ import ( "slices" "strconv" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index e43a2ad6710..7c61f2a8dfd 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -6,8 +6,8 @@ import ( "os" "regexp" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 42509eaceae..52cda1ed2e1 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/csconfig" diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index a2bd489acfa..711aa491078 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -5,8 +5,9 @@ import ( "strconv" "strings" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/file" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" ) @@ -106,62 +107,30 @@ func (o *OpOutput) String() string { return ret + "" } -func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program, parts []string) string { +func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { + locations := program.Locations() + src := string(program.Source()) - //log.Tracef("# extracting code for ip %d [%s]", ip, parts[1]) - if program.Locations[ip].Line == 0 { //it seems line is zero when it's not actual code (ie. op push at the beginning) - log.Tracef("zero location ?") - return "" - } - startLine := program.Locations[ip].Line - startColumn := program.Locations[ip].Column - lines := strings.Split(program.Source.Content(), "\n") - - endCol := 0 - endLine := 0 - - for i := ip + 1; i < len(program.Locations); i++ { - if program.Locations[i].Line > startLine || (program.Locations[i].Line == startLine && program.Locations[i].Column > startColumn) { - //we didn't had values yet and it's superior to current one, take it - if endLine == 0 && endCol == 0 { - endLine = program.Locations[i].Line - endCol = program.Locations[i].Column - } - //however, we are looking for the closest upper one - if program.Locations[i].Line < endLine || (program.Locations[i].Line == endLine && program.Locations[i].Column < endCol) { - endLine = program.Locations[i].Line - endCol = program.Locations[i].Column - } + currentInstruction := locations[ip] - } - } - //maybe it was the last instruction ? - if endCol == 0 && endLine == 0 { - endLine = len(lines) - endCol = len(lines[endLine-1]) - } - code_snippet := "" - startLine -= 1 //line count starts at 1 - endLine -= 1 - - for i := startLine; i <= endLine; i++ { - if i == startLine { - if startLine != endLine { - code_snippet += lines[i][startColumn:] - continue + var closest *file.Location + + for i := ip + 1; i < len(locations); i++ { + if locations[i].From > currentInstruction.From { + if closest == nil || locations[i].From < closest.From { + closest = &locations[i] } - code_snippet += lines[i][startColumn:endCol] - break - } - if i == endLine { - code_snippet += lines[i][:endCol] - break } - code_snippet += lines[i] } - log.Tracef("#code extract for ip %d [%s] -> '%s'", ip, parts[1], code_snippet) - return cleanTextForDebug(code_snippet) + var end int + if closest == nil { + end = len(src) + } else { + end = closest.From + } + + return cleanTextForDebug(src[locations[ip].From:end]) } func autoQuote(v any) string { @@ -189,7 +158,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part prevIdxOut = IdxOut - 1 currentDepth = outputs[prevIdxOut].CodeDepth if outputs[prevIdxOut].Func && !outputs[prevIdxOut].Finalized { - stack := vm.Stack() + stack := vm.Stack num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { outputs[prevIdxOut].FuncResults = append(outputs[prevIdxOut].FuncResults, autoQuote(stack[i])) @@ -197,7 +166,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } outputs[prevIdxOut].Finalized = true } else if (outputs[prevIdxOut].Comparison || outputs[prevIdxOut].Condition) && !outputs[prevIdxOut].Finalized { - stack := vm.Stack() + stack := vm.Stack outputs[prevIdxOut].StrConditionResult = fmt.Sprintf("%+v", stack) if val, ok := stack[0].(bool); ok { outputs[prevIdxOut].ConditionResult = new(bool) @@ -207,10 +176,10 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } } - erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack(), parts, currentDepth) + erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack, parts, currentDepth) out := OpOutput{} out.CodeDepth = currentDepth - out.Code = erp.extractCode(ip, program, parts) + out.Code = erp.extractCode(ip, program) switch parts[1] { case "OpBegin": @@ -221,8 +190,8 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.CodeDepth -= IndentStep out.BlockEnd = true //OpEnd can carry value, if it's any/all/count etc. - if len(vm.Stack()) > 0 { - out.StrConditionResult = fmt.Sprintf("%v", vm.Stack()) + if len(vm.Stack) > 0 { + out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) } outputs = append(outputs, out) case "OpNot": @@ -241,7 +210,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.StrConditionResult = "false" outputs = append(outputs, out) case "OpJumpIfTrue": //OR - stack := vm.Stack() + stack := vm.Stack out.JumpIf = true out.IfTrue = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) @@ -252,7 +221,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } outputs = append(outputs, out) case "OpJumpIfFalse": //AND - stack := vm.Stack() + stack := vm.Stack out.JumpIf = true out.IfFalse = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) @@ -264,7 +233,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCall1": //Op for function calls out.Func = true out.FuncName = parts[3] - stack := vm.Stack() + stack := vm.Stack num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) @@ -274,7 +243,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCall2": //Op for function calls out.Func = true out.FuncName = parts[3] - stack := vm.Stack() + stack := vm.Stack num_items := 2 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) @@ -284,7 +253,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCall3": //Op for function calls out.Func = true out.FuncName = parts[3] - stack := vm.Stack() + stack := vm.Stack num_items := 3 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) @@ -297,7 +266,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCallN": //Op for function calls with more than 3 args out.Func = true out.FuncName = parts[1] - stack := vm.Stack() + stack := vm.Stack //for OpCallN, we get the number of args if len(program.Arguments) >= ip { @@ -310,19 +279,19 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } } } else { //let's blindly take the items on stack - for _, val := range vm.Stack() { + for _, val := range vm.Stack { out.Args = append(out.Args, autoQuote(val)) } } outputs = append(outputs, out) case "OpEqualString", "OpEqual", "OpEqualInt": //comparisons - stack := vm.Stack() + stack := vm.Stack out.Comparison = true out.Left = autoQuote(stack[0]) out.Right = autoQuote(stack[1]) outputs = append(outputs, out) case "OpIn": //in operator - stack := vm.Stack() + stack := vm.Stack out.Condition = true out.ConditionIn = true //seems that we tend to receive stack[1] as a map. @@ -332,7 +301,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) case "OpContains": //kind OpIn , but reverse - stack := vm.Stack() + stack := vm.Stack out.Condition = true out.ConditionContains = true //seems that we tend to receive stack[1] as a map. @@ -347,7 +316,10 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part func (erp ExprRuntimeDebug) ipSeek(ip int) []string { for i := range len(erp.Lines) { - parts := strings.Split(erp.Lines[i], "\t") + parts := strings.Fields(erp.Lines[i]) + if len(parts) == 0 { + continue + } if parts[0] == strconv.Itoa(ip) { return parts } @@ -371,7 +343,7 @@ func cleanTextForDebug(text string) string { } func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry, ret any) { - logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(program.Source.Content())) + logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(string(program.Source()))) for _, output := range outputs { logger.Debugf("%s", output.String()) } @@ -383,57 +355,41 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op erp := ExprRuntimeDebug{ Logger: logger, } - debugErr := make(chan error) - var buf strings.Builder vm := vm.Debug() - done := false - program.Opcodes(&buf) - lines := strings.Split(buf.String(), "\n") + opcodes := program.Disassemble() + lines := strings.Split(opcodes, "\n") erp.Lines = lines go func() { + //We must never return until the execution of the program is done var err error erp.Logger.Tracef("[START] ip 0") ops := erp.ipSeek(0) if ops == nil { - debugErr <- fmt.Errorf("failed getting ops for ip 0") - return + log.Warningf("error while debugging expr: failed getting ops for ip 0") } if outputs, err = erp.ipDebug(0, vm, program, ops, outputs); err != nil { - debugErr <- fmt.Errorf("error while debugging at ip 0") + log.Warningf("error while debugging expr: error while debugging at ip 0") } vm.Step() for ip := range vm.Position() { ops := erp.ipSeek(ip) - if ops == nil { //we reached the end of the program, we shouldn't throw an error + if ops == nil { erp.Logger.Tracef("[DONE] ip %d", ip) - debugErr <- nil - return + break } if outputs, err = erp.ipDebug(ip, vm, program, ops, outputs); err != nil { - debugErr <- fmt.Errorf("error while debugging at ip %d", ip) - return - } - if done { - debugErr <- nil - return + log.Warningf("error while debugging expr: error while debugging at ip %d", ip) } vm.Step() } - debugErr <- nil }() var return_error error ret, err := vm.Run(program, env) - done = true //if the expr runtime failed, we don't need to wait for the debug to finish if err != nil { return_error = err - } else { - err = <-debugErr - if err != nil { - log.Warningf("error while debugging expr: %s", err) - } } //the overall result of expression is the result of last op ? if len(outputs) > 0 { diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 5f239885539..efdcbc1a769 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/antonmedv/expr" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -52,6 +52,7 @@ type teststruct struct { Foo string } +// You need to add the tag expr_debug when running the tests func TestBaseDbg(t *testing.T) { defaultEnv := map[string]interface{}{ "queue": &types.Queue{}, @@ -265,7 +266,6 @@ func TestBaseDbg(t *testing.T) { {Code: "Upper(base_string)", CodeDepth: 0, Func: true, FuncName: "Upper", Args: []string{"\"hello world\""}, FuncResults: []string{"\"HELLO WORLD\""}, ConditionResult: (*bool)(nil), Finalized: true}, {Code: "Upper('/someotherurl?account-name=admin&account-status=1&ow=cmd') )", CodeDepth: 0, Func: true, FuncName: "Upper", Args: []string{"\"/someotherurl?account-name=admin&account...\""}, FuncResults: []string{"\"/SOMEOTHERURL?ACCOUNT-NAME=ADMIN&ACCOUNT...\""}, ConditionResult: (*bool)(nil), Finalized: true}, {Code: "contains Upper('/someotherurl?account-name=admin&account-status=1&ow=cmd') )", CodeDepth: 0, Args: []string{"\"HELLO WORLD\"", "\"/SOMEOTHERURL?ACCOUNT-NAME=ADMIN&ACCOUNT...\""}, Condition: true, ConditionContains: true, StrConditionResult: "[false]", ConditionResult: boolPtr(false), Finalized: true}, - {Code: "and", CodeDepth: 0, JumpIf: true, IfFalse: true, StrConditionResult: "false", ConditionResult: boolPtr(false), Finalized: false}, {Code: "and", CodeDepth: 0, JumpIf: true, IfFalse: true, StrConditionResult: "false", ConditionResult: boolPtr(false), Finalized: true}, }, }, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index b9ba1d01191..f2eb208ebfa 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 43be5d82de0..b4324f8bbab 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -15,11 +15,11 @@ import ( "strings" "time" - "github.com/antonmedv/expr" "github.com/bluele/gcache" "github.com/c-robinson/iplib" "github.com/cespare/xxhash/v2" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" "github.com/oschwald/geoip2-golang" "github.com/oschwald/maxminddb-golang" "github.com/prometheus/client_golang/prometheus" diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go index 80de1619580..5845c3ae66b 100644 --- a/pkg/exprhelpers/jsonextract_test.go +++ b/pkg/exprhelpers/jsonextract_test.go @@ -3,7 +3,7 @@ package exprhelpers import ( "testing" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index d79d26fb9d0..be4fdbdb5e6 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -7,7 +7,7 @@ import ( "os" "strings" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" diff --git a/pkg/hubtest/scenario_assert.go b/pkg/hubtest/scenario_assert.go index bb004daad49..f32abf9e110 100644 --- a/pkg/hubtest/scenario_assert.go +++ b/pkg/hubtest/scenario_assert.go @@ -9,7 +9,7 @@ import ( "sort" "strings" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index e56eb097ba4..357d51f597b 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -3,8 +3,8 @@ package leakybucket import ( "fmt" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index 0e4076c305c..a203a639743 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 3e47f1fc16f..ca2e4d17d99 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -11,9 +11,9 @@ import ( "sync" "time" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/goombaio/namegenerator" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go index 8ec701a3400..01dd491ed41 100644 --- a/pkg/leakybucket/overflow_filter.go +++ b/pkg/leakybucket/overflow_filter.go @@ -3,8 +3,8 @@ package leakybucket import ( "fmt" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 6e61510fcc7..452ccc085b1 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -3,8 +3,8 @@ package leakybucket import ( "sync" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 197246c91a0..0cc0583390b 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -3,8 +3,8 @@ package leakybucket import ( "sync" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/parser/grok_pattern.go b/pkg/parser/grok_pattern.go index 5b3204a4201..9c781d47aa6 100644 --- a/pkg/parser/grok_pattern.go +++ b/pkg/parser/grok_pattern.go @@ -3,7 +3,7 @@ package parser import ( "time" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/grokky" ) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index bb57995a129..26046ae4fd6 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" @@ -202,7 +202,6 @@ func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.E return isWhitelisted, nil } - func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, bool, error) { // Process grok if present, should be exclusive with nodes :) clog := n.Logger diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go index fd1c2a0546a..e7b93a8d7da 100644 --- a/pkg/parser/whitelist.go +++ b/pkg/parser/whitelist.go @@ -4,8 +4,8 @@ import ( "fmt" "net" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/prometheus/client_golang/prometheus" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 5deff904e19..55af951bf89 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -10,8 +10,8 @@ import ( "sort" "github.com/Masterminds/semver/v3" - "github.com/antonmedv/expr" "github.com/blackfireio/osinfo" + "github.com/expr-lang/expr" "github.com/shirou/gopsutil/v3/process" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" diff --git a/pkg/types/event.go b/pkg/types/event.go index 7d8ef5825a2..76a447bdc8c 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -4,7 +4,7 @@ import ( "net" "time" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/models" From 7b1794ff896eb679775e67cdb8b150b42d30a69a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:59:24 +0200 Subject: [PATCH 233/318] CI: update test dependencies (#3146) --- docker/test/Pipfile.lock | 213 +++++++++++++++++++-------------------- 1 file changed, 106 insertions(+), 107 deletions(-) diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock index 75437876b72..2cb587b6b88 100644 --- a/docker/test/Pipfile.lock +++ b/docker/test/Pipfile.lock @@ -18,11 +18,11 @@ "default": { "certifi": { "hashes": [ - "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1", - "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474" + "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", + "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" ], "markers": "python_version >= '3.6'", - "version": "==2023.11.17" + "version": "==2024.7.4" }, "cffi": { "hashes": [ @@ -180,65 +180,60 @@ }, "cryptography": { "hashes": [ - "sha256:087887e55e0b9c8724cf05361357875adb5c20dec27e5816b653492980d20380", - "sha256:09a77e5b2e8ca732a19a90c5bca2d124621a1edb5438c5daa2d2738bfeb02589", - "sha256:130c0f77022b2b9c99d8cebcdd834d81705f61c68e91ddd614ce74c657f8b3ea", - "sha256:141e2aa5ba100d3788c0ad7919b288f89d1fe015878b9659b307c9ef867d3a65", - "sha256:28cb2c41f131a5758d6ba6a0504150d644054fd9f3203a1e8e8d7ac3aea7f73a", - "sha256:2f9f14185962e6a04ab32d1abe34eae8a9001569ee4edb64d2304bf0d65c53f3", - "sha256:320948ab49883557a256eab46149df79435a22d2fefd6a66fe6946f1b9d9d008", - "sha256:36d4b7c4be6411f58f60d9ce555a73df8406d484ba12a63549c88bd64f7967f1", - "sha256:3b15c678f27d66d247132cbf13df2f75255627bcc9b6a570f7d2fd08e8c081d2", - "sha256:3dbd37e14ce795b4af61b89b037d4bc157f2cb23e676fa16932185a04dfbf635", - "sha256:4383b47f45b14459cab66048d384614019965ba6c1a1a141f11b5a551cace1b2", - "sha256:44c95c0e96b3cb628e8452ec060413a49002a247b2b9938989e23a2c8291fc90", - "sha256:4b063d3413f853e056161eb0c7724822a9740ad3caa24b8424d776cebf98e7ee", - "sha256:52ed9ebf8ac602385126c9a2fe951db36f2cb0c2538d22971487f89d0de4065a", - "sha256:55d1580e2d7e17f45d19d3b12098e352f3a37fe86d380bf45846ef257054b242", - "sha256:5ef9bc3d046ce83c4bbf4c25e1e0547b9c441c01d30922d812e887dc5f125c12", - "sha256:5fa82a26f92871eca593b53359c12ad7949772462f887c35edaf36f87953c0e2", - "sha256:61321672b3ac7aade25c40449ccedbc6db72c7f5f0fdf34def5e2f8b51ca530d", - "sha256:701171f825dcab90969596ce2af253143b93b08f1a716d4b2a9d2db5084ef7be", - "sha256:841ec8af7a8491ac76ec5a9522226e287187a3107e12b7d686ad354bb78facee", - "sha256:8a06641fb07d4e8f6c7dda4fc3f8871d327803ab6542e33831c7ccfdcb4d0ad6", - "sha256:8e88bb9eafbf6a4014d55fb222e7360eef53e613215085e65a13290577394529", - "sha256:a00aee5d1b6c20620161984f8ab2ab69134466c51f58c052c11b076715e72929", - "sha256:a047682d324ba56e61b7ea7c7299d51e61fd3bca7dad2ccc39b72bd0118d60a1", - "sha256:a7ef8dd0bf2e1d0a27042b231a3baac6883cdd5557036f5e8df7139255feaac6", - "sha256:ad28cff53f60d99a928dfcf1e861e0b2ceb2bc1f08a074fdd601b314e1cc9e0a", - "sha256:b9097a208875fc7bbeb1286d0125d90bdfed961f61f214d3f5be62cd4ed8a446", - "sha256:b97fe7d7991c25e6a31e5d5e795986b18fbbb3107b873d5f3ae6dc9a103278e9", - "sha256:e0ec52ba3c7f1b7d813cd52649a5b3ef1fc0d433219dc8c93827c57eab6cf888", - "sha256:ea2c3ffb662fec8bbbfce5602e2c159ff097a4631d96235fcf0fb00e59e3ece4", - "sha256:fa3dec4ba8fb6e662770b74f62f1a0c7d4e37e25b58b2bf2c1be4c95372b4a33", - "sha256:fbeb725c9dc799a574518109336acccaf1303c30d45c075c665c0793c2f79a7f" + "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", + "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069", + "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2", + "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", + "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", + "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70", + "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", + "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", + "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", + "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", + "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", + "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f", + "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947", + "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", + "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", + "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", + "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66", + "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", + "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f", + "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", + "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", + "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", + "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", + "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1", + "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", + "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", + "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0" ], "markers": "python_version >= '3.7'", - "version": "==42.0.2" + "version": "==43.0.0" }, "docker": { "hashes": [ - "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b", - "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3" + "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", + "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0" ], "markers": "python_version >= '3.8'", - "version": "==7.0.0" + "version": "==7.1.0" }, "execnet": { "hashes": [ - "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41", - "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af" + "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", + "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3" ], - "markers": "python_version >= '3.7'", - "version": "==2.0.2" + "markers": "python_version >= '3.8'", + "version": "==2.1.1" }, "idna": { "hashes": [ - "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca", - "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f" + "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", + "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" ], "markers": "python_version >= '3.5'", - "version": "==3.6" + "version": "==3.7" }, "iniconfig": { "hashes": [ @@ -250,56 +245,58 @@ }, "packaging": { "hashes": [ - "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", - "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" + "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" ], - "markers": "python_version >= '3.7'", - "version": "==23.2" + "markers": "python_version >= '3.8'", + "version": "==24.1" }, "pluggy": { "hashes": [ - "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", - "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" ], "markers": "python_version >= '3.8'", - "version": "==1.4.0" + "version": "==1.5.0" }, "psutil": { "hashes": [ - "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d", - "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73", - "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8", - "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2", - "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e", - "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36", - "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7", - "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", - "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee", - "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", - "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", - "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", - "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", - "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631", - "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", - "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8" + "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", + "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0", + "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c", + "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", + "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", + "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c", + "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", + "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3", + "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", + "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", + "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6", + "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", + "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c", + "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", + "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", + "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14", + "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==5.9.8" + "version": "==6.0.0" }, "pycparser": { "hashes": [ - "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", - "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" + "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", + "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" ], - "version": "==2.21" + "markers": "python_version >= '3.8'", + "version": "==2.22" }, "pytest": { "hashes": [ - "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", - "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" + "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6", + "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c" ], "markers": "python_version >= '3.8'", - "version": "==8.0.0" + "version": "==8.3.1" }, "pytest-cs": { "git": "https://github.com/crowdsecurity/pytest-cs.git", @@ -327,6 +324,7 @@ "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", + "markers": "python_version >= '3.7'", "version": "==3.5.0" }, "python-dotenv": { @@ -396,11 +394,11 @@ }, "requests": { "hashes": [ - "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", - "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" ], - "markers": "python_version >= '3.7'", - "version": "==2.31.0" + "markers": "python_version >= '3.8'", + "version": "==2.32.3" }, "trustme": { "hashes": [ @@ -412,11 +410,11 @@ }, "urllib3": { "hashes": [ - "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20", - "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224" + "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", + "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" ], "markers": "python_version >= '3.8'", - "version": "==2.2.0" + "version": "==2.2.2" } }, "develop": { @@ -482,15 +480,16 @@ "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726" ], "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.13" }, "ipython": { "hashes": [ - "sha256:1050a3ab8473488d7eee163796b02e511d0735cf43a04ba2a8348bd0f2eaf8a5", - "sha256:48fbc236fbe0e138b88773fa0437751f14c3645fb483f1d4c5dee58b37e5ce73" + "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c", + "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff" ], "markers": "python_version >= '3.11'", - "version": "==8.21.0" + "version": "==8.26.0" }, "jedi": { "hashes": [ @@ -502,35 +501,35 @@ }, "matplotlib-inline": { "hashes": [ - "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311", - "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304" + "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", + "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca" ], - "markers": "python_version >= '3.5'", - "version": "==0.1.6" + "markers": "python_version >= '3.8'", + "version": "==0.1.7" }, "parso": { "hashes": [ - "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0", - "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75" + "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", + "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d" ], "markers": "python_version >= '3.6'", - "version": "==0.8.3" + "version": "==0.8.4" }, "pexpect": { "hashes": [ "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" ], - "markers": "sys_platform != 'win32'", + "markers": "sys_platform != 'win32' and sys_platform != 'emscripten'", "version": "==4.9.0" }, "prompt-toolkit": { "hashes": [ - "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d", - "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6" + "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", + "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360" ], "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.43" + "version": "==3.0.47" }, "ptyprocess": { "hashes": [ @@ -541,18 +540,18 @@ }, "pure-eval": { "hashes": [ - "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350", - "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3" + "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", + "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" ], - "version": "==0.2.2" + "version": "==0.2.3" }, "pygments": { "hashes": [ - "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c", - "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367" + "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", + "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a" ], - "markers": "python_version >= '3.7'", - "version": "==2.17.2" + "markers": "python_version >= '3.8'", + "version": "==2.18.0" }, "six": { "hashes": [ @@ -571,11 +570,11 @@ }, "traitlets": { "hashes": [ - "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74", - "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e" + "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", + "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f" ], "markers": "python_version >= '3.8'", - "version": "==5.14.1" + "version": "==5.14.3" }, "wcwidth": { "hashes": [ From 61d19cff84e1f9261143feb06736ee02adb1c8ee Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 22 Jul 2024 14:58:43 +0200 Subject: [PATCH 234/318] update table test --- test/bats/08_metrics_bouncer.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 778452644dd..84a55dc88c1 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -216,7 +216,7 @@ teardown() { | | IPs | bytes | packets | dogyear | pound | +----------------------------------+------------------+---------+---------+---------+-------+ | CAPI (community blocklist) | - | 3.80k | 100 | - | - | - | cscli | 1 | 380 | 10 | - | - | + | cscli (manual decisions) | 1 | 380 | 10 | - | - | | lists:anotherlist | - | 0 | 0 | - | - | | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | From 64e4ecde905a1dc0b4188cc0a6b2d26c739678e9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:29:18 +0200 Subject: [PATCH 235/318] LAPI: detailed metrics endpoint (#2858) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 351 +++++++++++------ cmd/crowdsec/crowdsec.go | 13 + cmd/crowdsec/lpmetrics.go | 182 +++++++++ cmd/crowdsec/main.go | 27 +- cmd/crowdsec/serve.go | 11 + go.mod | 3 +- go.sum | 2 - pkg/apiclient/client.go | 3 + pkg/apiclient/client_test.go | 2 +- pkg/apiclient/resperr.go | 12 +- pkg/apiclient/usagemetrics.go | 29 ++ pkg/apiserver/apic.go | 72 ++-- pkg/apiserver/apic_metrics.go | 212 ++++++++++ pkg/apiserver/apiserver.go | 7 +- pkg/apiserver/controllers/controller.go | 26 +- pkg/apiserver/controllers/v1/errors.go | 30 ++ pkg/apiserver/controllers/v1/errors_test.go | 57 +++ pkg/apiserver/controllers/v1/usagemetrics.go | 204 ++++++++++ pkg/apiserver/usage_metrics_test.go | 384 +++++++++++++++++++ pkg/csconfig/crowdsec_service_test.go | 19 +- pkg/csconfig/database.go | 13 +- pkg/database/bouncers.go | 22 ++ pkg/database/ent/metric.go | 16 +- pkg/database/ent/metric/metric.go | 12 +- pkg/database/ent/metric/where.go | 54 +-- pkg/database/ent/metric_create.go | 16 +- pkg/database/ent/migrate/schema.go | 9 +- pkg/database/ent/mutation.go | 52 +-- pkg/database/ent/schema/metric.go | 13 +- pkg/database/flush.go | 44 ++- pkg/database/machines.go | 51 ++- pkg/database/metrics.go | 73 ++++ test/bats/08_metrics_bouncer.bats | 209 +++++++++- test/bats/08_metrics_machines.bats | 1 - test/bats/11_bouncers_tls.bats | 29 ++ 35 files changed, 1958 insertions(+), 302 deletions(-) create mode 100644 cmd/crowdsec/lpmetrics.go create mode 100644 pkg/apiclient/usagemetrics.go create mode 100644 pkg/apiserver/controllers/v1/errors_test.go create mode 100644 pkg/apiserver/controllers/v1/usagemetrics.go create mode 100644 pkg/apiserver/usage_metrics_test.go create mode 100644 pkg/database/metrics.go diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 1a803cefbd2..7d80e902961 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" "io" + "sort" + "strings" "time" "github.com/jedib0t/go-pretty/v6/table" @@ -15,12 +17,15 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" ) -// un-aggregated data, de-normalized. +// bouncerMetricItem represents unaggregated, denormalized metric data. +// Possibly not unique if a bouncer sent the same data multiple times. type bouncerMetricItem struct { + collectedAt time.Time bouncerName string ipType string origin string @@ -29,14 +34,82 @@ type bouncerMetricItem struct { value float64 } +// aggregationOverTime is the first level of aggregation: we aggregate +// over time, then over ip type, then over origin. we only sum values +// for non-gauge metrics, and take the last value for gauge metrics. +type aggregationOverTime map[string]map[string]map[string]map[string]map[string]int64 + +func (a aggregationOverTime) add(bouncerName, origin, name, unit, ipType string, value float64, isGauge bool) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin]; !ok { + a[bouncerName][origin] = make(map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name]; !ok { + a[bouncerName][origin][name] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name][unit]; !ok { + a[bouncerName][origin][name][unit] = make(map[string]int64) + } + + if isGauge { + a[bouncerName][origin][name][unit][ipType] = int64(value) + } else { + a[bouncerName][origin][name][unit][ipType] += int64(value) + } +} + +// aggregationOverIPType is the second level of aggregation: data is summed +// regardless of the metrics type (gauge or not). This is used to display +// table rows, they won't differentiate ipv4 and ipv6 +type aggregationOverIPType map[string]map[string]map[string]map[string]int64 + +func (a aggregationOverIPType) add(bouncerName, origin, name, unit string, value int64) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin]; !ok { + a[bouncerName][origin] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name]; !ok { + a[bouncerName][origin][name] = make(map[string]int64) + } + + a[bouncerName][origin][name][unit] += value +} + +// aggregationOverOrigin is the third level of aggregation: these are +// the totals at the end of the table. Metrics without an origin will +// be added to the totals but not displayed in the rows, only in the footer. +type aggregationOverOrigin map[string]map[string]map[string]int64 + +func (a aggregationOverOrigin) add(bouncerName, name, unit string, value int64) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][name]; !ok { + a[bouncerName][name] = make(map[string]int64) + } + + a[bouncerName][name][unit] += value +} + type statBouncer struct { // oldest collection timestamp for each bouncer - oldestTS map[string]*time.Time - // we keep de-normalized metrics so we can iterate - // over them multiple times and keep the aggregation code simple - rawMetrics []bouncerMetricItem - aggregated map[string]map[string]map[string]map[string]int64 - aggregatedAllOrigin map[string]map[string]map[string]int64 + oldestTS map[string]time.Time + // aggregate over ip type: always sum + // [bouncer][origin][name][unit]value + aggOverIPType aggregationOverIPType + // aggregate over origin: always sum + // [bouncer][name][unit]value + aggOverOrigin aggregationOverOrigin } var knownPlurals = map[string]string{ @@ -46,15 +119,15 @@ var knownPlurals = map[string]string{ } func (s *statBouncer) MarshalJSON() ([]byte, error) { - return json.Marshal(s.aggregated) + return json.Marshal(s.aggOverIPType) } -func (s *statBouncer) Description() (string, string) { +func (*statBouncer) Description() (string, string) { return "Bouncer Metrics", `Network traffic blocked by bouncers.` } -func warnOnce(warningsLogged map[string]bool, msg string) { +func logWarningOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { log.Warningf(msg) @@ -62,67 +135,58 @@ func warnOnce(warningsLogged map[string]bool, msg string) { } } -func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { - if db == nil { - return nil - } - - // query all bouncer metrics that have not been flushed - - metrics, err := db.Ent.Metric.Query(). - Where( - metric.GeneratedTypeEQ(metric.GeneratedTypeRC), - ).All(ctx) - if err != nil { - return fmt.Errorf("unable to fetch metrics: %w", err) - } - - s.oldestTS = make(map[string]*time.Time) +// extractRawMetrics converts metrics from the database to a de-normalized, de-duplicated slice +// it returns the slice and the oldest timestamp for each bouncer +func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricItem, map[string]time.Time) { + oldestTS := make(map[string]time.Time) // don't spam the user with the same warnings warningsLogged := make(map[string]bool) + // store raw metrics, de-duplicated in case some were sent multiple times + uniqueRaw := make(map[bouncerMetricItem]struct{}) + for _, met := range metrics { bouncerName := met.GeneratedBy - collectedAt := met.CollectedAt - if s.oldestTS[bouncerName] == nil || collectedAt.Before(*s.oldestTS[bouncerName]) { - s.oldestTS[bouncerName] = &collectedAt - } - - type bouncerMetrics struct { + var payload struct { Metrics []models.DetailedMetrics `json:"metrics"` } - payload := bouncerMetrics{} - - err := json.Unmarshal([]byte(met.Payload), &payload) - if err != nil { + if err := json.Unmarshal([]byte(met.Payload), &payload); err != nil { log.Warningf("while parsing metrics for %s: %s", bouncerName, err) continue } for _, m := range payload.Metrics { - for _, item := range m.Items { - labels := item.Labels + // fields like timestamp, name, etc. are mandatory but we got pointers, so we check anyway + if m.Meta.UtcNowTimestamp == nil { + logWarningOnce(warningsLogged, "missing 'utc_now_timestamp' field in metrics reported by "+bouncerName) + continue + } + + collectedAt := time.Unix(*m.Meta.UtcNowTimestamp, 0).UTC() - // these are mandatory but we got pointers, so... + if oldestTS[bouncerName].IsZero() || collectedAt.Before(oldestTS[bouncerName]) { + oldestTS[bouncerName] = collectedAt + } + for _, item := range m.Items { valid := true if item.Name == nil { - warnOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) // no continue - keep checking the rest valid = false } if item.Unit == nil { - warnOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) valid = false } if item.Value == nil { - warnOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) valid = false } @@ -130,94 +194,152 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { continue } - name := *item.Name - unit := *item.Unit - value := *item.Value - rawMetric := bouncerMetricItem{ + collectedAt: collectedAt, bouncerName: bouncerName, - ipType: labels["ip_type"], - origin: labels["origin"], - name: name, - unit: unit, - value: value, + ipType: item.Labels["ip_type"], + origin: item.Labels["origin"], + name: *item.Name, + unit: *item.Unit, + value: *item.Value, } - s.rawMetrics = append(s.rawMetrics, rawMetric) + uniqueRaw[rawMetric] = struct{}{} } } } - s.aggregate() + // extract raw metric structs + keys := make([]bouncerMetricItem, 0, len(uniqueRaw)) + for key := range uniqueRaw { + keys = append(keys, key) + } - return nil + // order them by timestamp + sort.Slice(keys, func(i, j int) bool { + return keys[i].collectedAt.Before(keys[j].collectedAt) + }) + + return keys, oldestTS } -func (s *statBouncer) aggregate() { - // [bouncer][origin][name][unit]value - if s.aggregated == nil { - s.aggregated = make(map[string]map[string]map[string]map[string]int64) +func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { + if db == nil { + return nil } - if s.aggregatedAllOrigin == nil { - s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) + // query all bouncer metrics that have not been flushed + + metrics, err := db.Ent.Metric.Query(). + Where(metric.GeneratedTypeEQ(metric.GeneratedTypeRC)). + All(ctx) + if err != nil { + return fmt.Errorf("unable to fetch metrics: %w", err) } - for _, raw := range s.rawMetrics { - if _, ok := s.aggregated[raw.bouncerName]; !ok { - s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) - } + // de-normalize, de-duplicate metrics and keep the oldest timestamp for each bouncer - if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { - s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]int64) - } + rawMetrics, oldestTS := s.extractRawMetrics(metrics) - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]int64) - } + s.oldestTS = oldestTS + aggOverTime := s.newAggregationOverTime(rawMetrics) + s.aggOverIPType = s.newAggregationOverIPType(aggOverTime) + s.aggOverOrigin = s.newAggregationOverOrigin(s.aggOverIPType) - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 - } + return nil +} - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) +// return true if the metric is a gauge and should not be aggregated +func (*statBouncer) isGauge(name string) bool { + return name == "active_decisions" || strings.HasSuffix(name, "_gauge") +} - if _, ok := s.aggregatedAllOrigin[raw.bouncerName]; !ok { - s.aggregatedAllOrigin[raw.bouncerName] = make(map[string]map[string]int64) - } +// formatMetricName returns the metric name to display in the table header +func (*statBouncer) formatMetricName(name string) string { + return strings.TrimSuffix(name, "_gauge") +} - if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name]; !ok { - s.aggregatedAllOrigin[raw.bouncerName][raw.name] = make(map[string]int64) - } +// formatMetricOrigin returns the origin to display in the table rows +// (for example, some users don't know what capi is) +func (*statBouncer) formatMetricOrigin(origin string) string { + switch origin { + case "CAPI": + return origin + " (community blocklist)" + case "cscli": + return origin + " (manual decisions)" + case "crowdsec": + return origin + " (security engine)" + default: + return origin + } +} + +func (s *statBouncer) newAggregationOverTime(rawMetrics []bouncerMetricItem) aggregationOverTime { + ret := aggregationOverTime{} - if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit]; !ok { - s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] = 0 + for _, raw := range rawMetrics { + ret.add(raw.bouncerName, raw.origin, raw.name, raw.unit, raw.ipType, raw.value, s.isGauge(raw.name)) + } + + return ret +} + +func (*statBouncer) newAggregationOverIPType(aggMetrics aggregationOverTime) aggregationOverIPType { + ret := aggregationOverIPType{} + + for bouncerName := range aggMetrics { + for origin := range aggMetrics[bouncerName] { + for name := range aggMetrics[bouncerName][origin] { + for unit := range aggMetrics[bouncerName][origin][name] { + for ipType := range aggMetrics[bouncerName][origin][name][unit] { + value := aggMetrics[bouncerName][origin][name][unit][ipType] + ret.add(bouncerName, origin, name, unit, value) + } + } + } } + } - s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] += int64(raw.value) + return ret +} + +func (*statBouncer) newAggregationOverOrigin(aggMetrics aggregationOverIPType) aggregationOverOrigin { + ret := aggregationOverOrigin{} + + for bouncerName := range aggMetrics { + for origin := range aggMetrics[bouncerName] { + for name := range aggMetrics[bouncerName][origin] { + for unit := range aggMetrics[bouncerName][origin][name] { + val := aggMetrics[bouncerName][origin][name][unit] + ret.add(bouncerName, name, unit, val) + } + } + } } + + return ret } // bouncerTable displays a table of metrics for a single bouncer func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { - columns := make(map[string]map[string]bool) + columns := make(map[string]map[string]struct{}) - for _, item := range s.rawMetrics { - if item.bouncerName != bouncerName { - continue - } + bouncerData, ok := s.aggOverOrigin[bouncerName] + if !ok { + // no metrics for this bouncer, skip. how did we get here ? + // anyway we can't honor the "showEmpty" flag in this case, + // we don't even have the table headers + return + } + + for metricName, units := range bouncerData { // build a map of the metric names and units, to display dynamic columns - if _, ok := columns[item.name]; !ok { - columns[item.name] = make(map[string]bool) + columns[metricName] = make(map[string]struct{}) + for unit := range units { + columns[metricName][unit] = struct{}{} } - - columns[item.name][item.unit] = true } - // no metrics for this bouncer, skip. how did we get here ? - // anyway we can't honor the "showEmpty" flag in this case, - // we don't heven have the table headers - if len(columns) == 0 { return } @@ -238,11 +360,11 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor for _, unit := range maptools.SortedKeys(columns[name]) { colNum += 1 - header1 = append(header1, name) + header1 = append(header1, s.formatMetricName(name)) // we don't add "s" to random words - if knownPlurals[unit] != "" { - unit = knownPlurals[unit] + if plural, ok := knownPlurals[unit]; ok { + unit = plural } header2 = append(header2, unit) @@ -264,7 +386,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor // sort all the ranges for stable output - for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + for _, origin := range maptools.SortedKeys(s.aggOverIPType[bouncerName]) { if origin == "" { // if the metric has no origin (i.e. processed bytes/packets) // we don't display it in the table body but it still gets aggreagted @@ -272,21 +394,15 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor continue } - metrics := s.aggregated[bouncerName][origin] - - // some users don't know what capi is - if origin == "CAPI" { - origin += " (community blocklist)" - } + metrics := s.aggOverIPType[bouncerName][origin] - row := table.Row{origin} + row := table.Row{s.formatMetricOrigin(origin)} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { valStr := "-" - val, ok := metrics[name][unit] - if ok { + if val, ok := metrics[name][unit]; ok { valStr = formatNumber(val, !noUnit) } @@ -299,7 +415,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor numRows += 1 } - totals := s.aggregatedAllOrigin[bouncerName] + totals := s.aggOverOrigin[bouncerName] if numRows == 0 { t.Style().Options.SeparateFooter = false @@ -319,27 +435,20 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s (%s)", title, bouncerName) if s.oldestTS != nil { - // if we change this to .Local() beware of tests + // if you change this to .Local() beware of tests title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } - title += ":" - // don't use SetTitle() because it draws the title inside table box - io.WriteString(out, title+"\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") // empty line between tables io.WriteString(out, "\n") } // Table displays a table of metrics for each bouncer func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { - bouncerNames := make(map[string]bool) - for _, item := range s.rawMetrics { - bouncerNames[item.bouncerName] = true - } - - for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) } } diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 2be8a84fec0..5aafc6b0dfe 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -140,6 +140,19 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H }) outputWg.Wait() + mp := NewMetricsProvider( + apiClient, + lpMetricsDefaultInterval, + log.WithField("service", "lpmetrics"), + []string{}, + datasources, + hub, + ) + + lpMetricsTomb.Go(func() error { + return mp.Run(context.Background(), &lpMetricsTomb) + }) + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { aggregated := false if cConfig.Prometheus.Level == configuration.CFG_METRICS_AGGREGATE { diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go new file mode 100644 index 00000000000..0fd27054071 --- /dev/null +++ b/cmd/crowdsec/lpmetrics.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/fflag" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +const lpMetricsDefaultInterval = 30 * time.Minute + +// MetricsProvider collects metrics from the LP and sends them to the LAPI +type MetricsProvider struct { + apic *apiclient.ApiClient + interval time.Duration + static staticMetrics + logger *logrus.Entry +} + +type staticMetrics struct { + osName string + osVersion string + startupTS int64 + featureFlags []string + consoleOptions []string + datasourceMap map[string]int64 + hubState models.HubItems +} + +func getHubState(hub *cwhub.Hub) models.HubItems { + ret := models.HubItems{} + + for _, itemType := range cwhub.ItemTypes { + ret[itemType] = []models.HubItem{} + items, _ := hub.GetInstalledItemsByType(itemType) + cwhub.SortItemSlice(items) + + for _, item := range items { + status := "official" + if item.State.IsLocal() { + status = "custom" + } + if item.State.Tainted { + status = "tainted" + } + ret[itemType] = append(ret[itemType], models.HubItem{ + Name: item.Name, + Status: status, + Version: item.Version, + }) + } + } + + return ret +} + +// newStaticMetrics is called when the process starts, or reloads the configuration +func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) staticMetrics { + datasourceMap := map[string]int64{} + + for _, ds := range datasources { + datasourceMap[ds.GetName()] += 1 + } + + osName, osVersion := version.DetectOS() + + return staticMetrics{ + osName: osName, + osVersion: osVersion, + startupTS: time.Now().UTC().Unix(), + featureFlags: fflag.Crowdsec.GetEnabledFeatures(), + consoleOptions: consoleOptions, + datasourceMap: datasourceMap, + hubState: getHubState(hub), + } +} + +func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + return &MetricsProvider{ + apic: apic, + interval: interval, + logger: logger, + static: newStaticMetrics(consoleOptions, datasources, hub), + } +} + +func (m *MetricsProvider) metricsPayload() *models.AllMetrics { + os := &models.OSversion{ + Name: ptr.Of(m.static.osName), + Version: ptr.Of(m.static.osVersion), + } + + base := models.BaseMetrics{ + UtcStartupTimestamp: ptr.Of(m.static.startupTS), + Os: os, + Version: ptr.Of(version.String()), + FeatureFlags: m.static.featureFlags, + Metrics: make([]*models.DetailedMetrics, 0), + } + + met := &models.LogProcessorsMetrics{ + BaseMetrics: base, + Datasources: m.static.datasourceMap, + HubItems: m.static.hubState, + } + + met.Metrics = append(met.Metrics, &models.DetailedMetrics{ + Meta: &models.MetricsMeta{ + UtcNowTimestamp: ptr.Of(time.Now().Unix()), + WindowSizeSeconds: ptr.Of(int64(m.interval.Seconds())), + }, + Items: make([]*models.MetricsDetailItem, 0), + }) + + return &models.AllMetrics{ + LogProcessors: []*models.LogProcessorsMetrics{met}, + } +} + +func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { + defer trace.CatchPanic("crowdsec/MetricsProvider.Run") + + if m.interval == time.Duration(0) { + return nil + } + + met := m.metricsPayload() + + ticker := time.NewTicker(1) // Send on start + + for { + select { + case <-ticker.C: + ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + _, resp, err := m.apic.UsageMetrics.Add(ctxTime, met) + switch { + case errors.Is(err, context.DeadlineExceeded): + m.logger.Warnf("timeout sending lp metrics") + ticker.Reset(m.interval) + continue + case err != nil && resp != nil && resp.Response.StatusCode == http.StatusNotFound: + m.logger.Warnf("metrics endpoint not found, older LAPI?") + ticker.Reset(m.interval) + continue + case err != nil: + m.logger.Warnf("failed to send lp metrics: %s", err) + ticker.Reset(m.interval) + continue + } + + if resp.Response.StatusCode != http.StatusCreated { + m.logger.Warnf("failed to send lp metrics: %s", resp.Response.Status) + ticker.Reset(m.interval) + continue + } + + ticker.Reset(m.interval) + + m.logger.Tracef("lp usage metrics sent") + case <-myTomb.Dying(): + ticker.Stop() + return nil + } + } +} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 26e39eb069c..18416e044e7 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -29,28 +29,29 @@ import ( ) var ( - /*tombs for the parser, buckets and outputs.*/ - acquisTomb tomb.Tomb - parsersTomb tomb.Tomb - bucketsTomb tomb.Tomb - outputsTomb tomb.Tomb - apiTomb tomb.Tomb - crowdsecTomb tomb.Tomb - pluginTomb tomb.Tomb + // tombs for the parser, buckets and outputs. + acquisTomb tomb.Tomb + parsersTomb tomb.Tomb + bucketsTomb tomb.Tomb + outputsTomb tomb.Tomb + apiTomb tomb.Tomb + crowdsecTomb tomb.Tomb + pluginTomb tomb.Tomb + lpMetricsTomb tomb.Tomb flags *Flags - /*the state of acquisition*/ + // the state of acquisition dataSources []acquisition.DataSource - /*the state of the buckets*/ + // the state of the buckets holders []leakybucket.BucketFactory buckets *leakybucket.Buckets inputLineChan chan types.Event inputEventChan chan types.Event outputEventChan chan types.Event // the buckets init returns its own chan that is used for multiplexing - /*settings*/ - lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ + // settings + lastProcessedItem time.Time // keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them. pluginBroker csplugin.PluginBroker ) @@ -307,7 +308,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if cConfig.API != nil && cConfig.API.Server != nil { cConfig.API.Server.OnlineClient = nil } - /*if the api is disabled as well, just read file and exit, don't daemonize*/ + // if the api is disabled as well, just read file and exit, don't daemonize if cConfig.DisableAPI { cConfig.Common.Daemonize = false } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 5fb7b86f181..f1a658e9512 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -60,6 +60,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { apiTomb = tomb.Tomb{} crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + lpMetricsTomb = tomb.Tomb{} cConfig, err := LoadConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false) if err != nil { @@ -179,6 +180,15 @@ func ShutdownCrowdsecRoutines() error { log.Warningf("Outputs didn't finish in time, some events may have not been flushed") } + lpMetricsTomb.Kill(nil) + + if err := lpMetricsTomb.Wait(); err != nil { + log.Warningf("Metrics returned error : %s", err) + reterr = err + } + + log.Debugf("metrics are done") + // He's dead, Jim. crowdsecTomb.Kill(nil) @@ -322,6 +332,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { apiTomb = tomb.Tomb{} crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + lpMetricsTomb = tomb.Tomb{} ctx := context.TODO() diff --git a/go.mod b/go.mod index f36bbcd996d..af9d7550b94 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 - github.com/antonmedv/expr v1.15.3 github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 @@ -33,6 +32,7 @@ require ( github.com/dghubble/sling v1.4.2 github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 + github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.9.1 @@ -111,7 +111,6 @@ require ( github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/expr-lang/expr v1.16.9 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index d4cc2651f0f..282f10d6367 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexliesenfeld/health v0.8.0 h1:lCV0i+ZJPTbqP7LfKG7p3qZBl5VhelwUFCIVWl77fgk= github.com/alexliesenfeld/health v0.8.0/go.mod h1:TfNP0f+9WQVWMQRzvMUjlws4ceXKEL3WR+6Hp95HUFc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= -github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5poiZ7Y= diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index b702829efd3..3abd42cf009 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -39,6 +39,7 @@ type ApiClient struct { Metrics *MetricsService Signal *SignalService HeartBeat *HeartBeatService + UsageMetrics *UsageMetricsService } func (a *ApiClient) GetClient() *http.Client { @@ -108,6 +109,7 @@ func NewClient(config *Config) (*ApiClient, error) { c.Signal = (*SignalService)(&c.common) c.DecisionDelete = (*DecisionDeleteService)(&c.common) c.HeartBeat = (*HeartBeatService)(&c.common) + c.UsageMetrics = (*UsageMetricsService)(&c.common) return c, nil } @@ -144,6 +146,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt c.Signal = (*SignalService)(&c.common) c.DecisionDelete = (*DecisionDeleteService)(&c.common) c.HeartBeat = (*HeartBeatService)(&c.common) + c.UsageMetrics = (*UsageMetricsService)(&c.common) return c, nil } diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index 2adba170584..dd09811924f 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -348,5 +348,5 @@ func TestNewClientBadAnswer(t *testing.T) { URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) - cstest.RequireErrorContains(t, err, "invalid body: invalid character 'b' looking for beginning of value") + cstest.RequireErrorContains(t, err, "invalid body: bad") } diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index ff954a73609..e8f12ee9f4e 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -34,12 +34,18 @@ func CheckResponse(r *http.Response) error { data, err := io.ReadAll(r.Body) if err != nil || len(data) == 0 { - ret.Message = ptr.Of(fmt.Sprintf("http code %d, no error message", r.StatusCode)) + ret.Message = ptr.Of(fmt.Sprintf("http code %d, no response body", r.StatusCode)) return ret } - if err := json.Unmarshal(data, ret); err != nil { - return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) + switch r.StatusCode { + case http.StatusUnprocessableEntity: + ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid request: %s", r.StatusCode, string(data))) + default: + if err := json.Unmarshal(data, ret); err != nil { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid body: %s", r.StatusCode, string(data))) + return ret + } } return ret diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go new file mode 100644 index 00000000000..1d822bb5c1e --- /dev/null +++ b/pkg/apiclient/usagemetrics.go @@ -0,0 +1,29 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type UsageMetricsService service + +func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetrics) (interface{}, *Response, error) { + u := fmt.Sprintf("%s/usage-metrics", s.client.URLPrefix) + + req, err := s.client.NewRequest(http.MethodPost, u, &metrics) + if err != nil { + return nil, nil, err + } + + var response interface{} + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, err + } + + return &response, resp, nil +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 68dc94367e2..284d0acdabf 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -35,26 +35,30 @@ import ( const ( // delta values must be smaller than the interval - pullIntervalDefault = time.Hour * 2 - pullIntervalDelta = 5 * time.Minute - pushIntervalDefault = time.Second * 10 - pushIntervalDelta = time.Second * 7 - metricsIntervalDefault = time.Minute * 30 - metricsIntervalDelta = time.Minute * 15 + pullIntervalDefault = time.Hour * 2 + pullIntervalDelta = 5 * time.Minute + pushIntervalDefault = time.Second * 10 + pushIntervalDelta = time.Second * 7 + metricsIntervalDefault = time.Minute * 30 + metricsIntervalDelta = time.Minute * 15 + usageMetricsInterval = time.Minute * 30 + usageMetricsIntervalFirst = time.Minute * 15 ) type apic struct { // when changing the intervals in tests, always set *First too // or they can be negative - pullInterval time.Duration - pullIntervalFirst time.Duration - pushInterval time.Duration - pushIntervalFirst time.Duration - metricsInterval time.Duration - metricsIntervalFirst time.Duration - dbClient *database.Client - apiClient *apiclient.ApiClient - AlertsAddChan chan []*models.Alert + pullInterval time.Duration + pullIntervalFirst time.Duration + pushInterval time.Duration + pushIntervalFirst time.Duration + metricsInterval time.Duration + metricsIntervalFirst time.Duration + usageMetricsInterval time.Duration + usageMetricsIntervalFirst time.Duration + dbClient *database.Client + apiClient *apiclient.ApiClient + AlertsAddChan chan []*models.Alert mu sync.Mutex pushTomb tomb.Tomb @@ -175,24 +179,26 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con var err error ret := &apic{ - AlertsAddChan: make(chan []*models.Alert), - dbClient: dbClient, - mu: sync.Mutex{}, - startup: true, - credentials: config.Credentials, - pullTomb: tomb.Tomb{}, - pushTomb: tomb.Tomb{}, - metricsTomb: tomb.Tomb{}, - scenarioList: make([]string, 0), - consoleConfig: consoleConfig, - pullInterval: pullIntervalDefault, - pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), - pushInterval: pushIntervalDefault, - pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), - metricsInterval: metricsIntervalDefault, - metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), - isPulling: make(chan bool, 1), - whitelists: apicWhitelist, + AlertsAddChan: make(chan []*models.Alert), + dbClient: dbClient, + mu: sync.Mutex{}, + startup: true, + credentials: config.Credentials, + pullTomb: tomb.Tomb{}, + pushTomb: tomb.Tomb{}, + metricsTomb: tomb.Tomb{}, + scenarioList: make([]string, 0), + consoleConfig: consoleConfig, + pullInterval: pullIntervalDefault, + pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), + pushInterval: pushIntervalDefault, + pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), + metricsInterval: metricsIntervalDefault, + metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), + usageMetricsInterval: usageMetricsInterval, + usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalFirst), + isPulling: make(chan bool, 1), + whitelists: apicWhitelist, } password := strfmt.Password(config.Credentials.Password) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index b8e23629e1e..54640afc2d0 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -2,7 +2,10 @@ package apiserver import ( "context" + "encoding/json" + "net/http" "slices" + "strings" "time" log "github.com/sirupsen/logrus" @@ -11,9 +14,170 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/models" ) +type dbPayload struct { + Metrics []*models.DetailedMetrics `json:"metrics"` +} + +func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { + allMetrics := &models.AllMetrics{} + metricsIds := make([]int, 0) + + lps, err := a.dbClient.ListMachines() + if err != nil { + return nil, nil, err + } + + bouncers, err := a.dbClient.ListBouncers() + if err != nil { + return nil, nil, err + } + + for _, bouncer := range bouncers { + dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) + if err != nil { + log.Errorf("unable to get bouncer usage metrics: %s", err) + continue + } + + rcMetrics := models.RemediationComponentsMetrics{} + + rcMetrics.Os = &models.OSversion{ + Name: ptr.Of(bouncer.Osname), + Version: ptr.Of(bouncer.Osversion), + } + rcMetrics.Type = bouncer.Type + rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") + rcMetrics.Version = ptr.Of(bouncer.Version) + rcMetrics.Name = bouncer.Name + rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + + rcMetrics.Metrics = make([]*models.DetailedMetrics, 0) + + // Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics + for _, dbMetric := range dbMetrics { + dbPayload := &dbPayload{} + // Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + metricsIds = append(metricsIds, dbMetric.ID) + + err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) + if err != nil { + log.Errorf("unable to unmarshal bouncer metric (%s)", err) + continue + } + + rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics...) + } + + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) + } + + for _, lp := range lps { + dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) + if err != nil { + log.Errorf("unable to get LP usage metrics: %s", err) + continue + } + + lpMetrics := models.LogProcessorsMetrics{} + + lpMetrics.Os = &models.OSversion{ + Name: ptr.Of(lp.Osname), + Version: ptr.Of(lp.Osversion), + } + lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") + lpMetrics.Version = ptr.Of(lp.Version) + lpMetrics.Name = lp.MachineId + lpMetrics.LastPush = lp.LastPush.UTC().Unix() + lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + + lpMetrics.Datasources = lp.Datasources + + if lp.Hubstate != nil { + // must carry over the hub state even if nothing is installed + hubItems := models.HubItems{} + for itemType, items := range lp.Hubstate { + hubItems[itemType] = []models.HubItem{} + for _, item := range items { + hubItems[itemType] = append(hubItems[itemType], models.HubItem{ + Name: item.Name, + Status: item.Status, + Version: item.Version, + }) + } + + lpMetrics.HubItems = hubItems + } + } else { + lpMetrics.HubItems = models.HubItems{} + } + + lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) + + for _, dbMetric := range dbMetrics { + dbPayload := &dbPayload{} + // Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + metricsIds = append(metricsIds, dbMetric.ID) + + err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) + if err != nil { + log.Errorf("unable to unmarshal log processor metric (%s)", err) + continue + } + + lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics...) + } + + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) + } + + // FIXME: all of this should only be done once on startup/reload + consoleOptions := strings.Join(csconfig.GetConfig().API.Server.ConsoleConfig.EnabledOptions(), ",") + allMetrics.Lapi = &models.LapiMetrics{ + ConsoleOptions: models.ConsoleOptions{ + consoleOptions, + }, + } + + osName, osVersion := version.DetectOS() + + allMetrics.Lapi.Os = &models.OSversion{ + Name: ptr.Of(osName), + Version: ptr.Of(osVersion), + } + allMetrics.Lapi.Version = ptr.Of(version.String()) + allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() + + allMetrics.Lapi.Metrics = make([]*models.DetailedMetrics, 0) + + allMetrics.Lapi.Metrics = append(allMetrics.Lapi.Metrics, &models.DetailedMetrics{ + Meta: &models.MetricsMeta{ + UtcNowTimestamp: ptr.Of(time.Now().UTC().Unix()), + WindowSizeSeconds: ptr.Of(int64(a.metricsInterval.Seconds())), + }, + Items: make([]*models.MetricsDetailItem, 0), + }) + + // Force an actual slice to avoid non existing fields in the json + if allMetrics.RemediationComponents == nil { + allMetrics.RemediationComponents = make([]*models.RemediationComponentsMetrics, 0) + } + + if allMetrics.LogProcessors == nil { + allMetrics.LogProcessors = make([]*models.LogProcessorsMetrics, 0) + } + + return allMetrics, metricsIds, nil +} + +func (a *apic) MarkUsageMetricsAsSent(ids []int) error { + return a.dbClient.MarkUsageMetricsAsSent(ids) +} + func (a *apic) GetMetrics() (*models.Metrics, error) { machines, err := a.dbClient.ListMachines() if err != nil { @@ -160,3 +324,51 @@ func (a *apic) SendMetrics(stop chan (bool)) { } } } + +func (a *apic) SendUsageMetrics() { + defer trace.CatchPanic("lapi/usageMetricsToAPIC") + + firstRun := true + + ticker := time.NewTicker(a.usageMetricsIntervalFirst) + + for { + select { + case <-a.metricsTomb.Dying(): + // The normal metrics routine also kills push/pull tombs, does that make sense ? + ticker.Stop() + return + case <-ticker.C: + if firstRun { + firstRun = false + + ticker.Reset(a.usageMetricsInterval) + } + + metrics, metricsId, err := a.GetUsageMetrics() + if err != nil { + log.Errorf("unable to get usage metrics: %s", err) + continue + } + + _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) + if err != nil { + log.Errorf("unable to send usage metrics: %s", err) + + if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { + // In case of 422, mark the metrics as sent anyway, the API did not like what we sent, + // and it's unlikely we'll be able to fix it + continue + } + } + + err = a.MarkUsageMetricsAsSent(metricsId) + if err != nil { + log.Errorf("unable to mark usage metrics as sent: %s", err) + continue + } + + log.Infof("Sent %d usage metrics", len(metricsId)) + } + } +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index c6074801d7e..bd0b5d39bf4 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -367,6 +367,11 @@ func (s *APIServer) Run(apiReady chan bool) error { s.apic.SendMetrics(make(chan bool)) return nil }) + + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics() + return nil + }) } s.httpServerTomb.Go(func() error { @@ -375,7 +380,7 @@ func (s *APIServer) Run(apiReady chan bool) error { if err := s.httpServerTomb.Wait(); err != nil { return fmt.Errorf("local API server stopped with error: %w", err) - } + } return nil } diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 51f359244be..8175f431384 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -4,12 +4,13 @@ import ( "context" "net" "net/http" + "strings" "github.com/alexliesenfeld/health" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -59,6 +60,23 @@ func serveHealth() http.HandlerFunc { return health.NewHandler(checker) } +func eitherAuthMiddleware(jwtMiddleware gin.HandlerFunc, apiKeyMiddleware gin.HandlerFunc) gin.HandlerFunc { + return func(c *gin.Context) { + switch { + case c.GetHeader("X-Api-Key") != "": + apiKeyMiddleware(c) + case c.GetHeader("Authorization") != "": + jwtMiddleware(c) + // uh no auth header. is this TLS with mutual authentication? + case strings.HasPrefix(c.Request.UserAgent(), "crowdsec/"): + // guess log processors by sniffing user-agent + jwtMiddleware(c) + default: + apiKeyMiddleware(c) + } + } +} + func (c *Controller) NewV1() error { var err error @@ -117,6 +135,12 @@ func (c *Controller) NewV1() error { apiKeyAuth.HEAD("/decisions/stream", c.HandlerV1.StreamDecision) } + eitherAuth := groupV1.Group("") + eitherAuth.Use(eitherAuthMiddleware(c.HandlerV1.Middlewares.JWT.Middleware.MiddlewareFunc(), c.HandlerV1.Middlewares.APIKey.MiddlewareFunc())) + { + eitherAuth.POST("/usage-metrics", c.HandlerV1.UsageMetrics) + } + return nil } diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index 9004528e1b1..d661de44b0e 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -3,6 +3,7 @@ package v1 import ( "errors" "net/http" + "strings" "github.com/gin-gonic/gin" @@ -37,3 +38,32 @@ func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { return } } + +// collapseRepeatedPrefix collapses repeated occurrences of a given prefix in the text +func collapseRepeatedPrefix(text string, prefix string) string { + count := 0 + for strings.HasPrefix(text, prefix) { + count++ + text = strings.TrimPrefix(text, prefix) + } + + if count > 0 { + return prefix + text + } + + return text +} + +// RepeatedPrefixError wraps an error and removes the repeating prefix from its message +type RepeatedPrefixError struct { + OriginalError error + Prefix string +} + +func (e RepeatedPrefixError) Error() string { + return collapseRepeatedPrefix(e.OriginalError.Error(), e.Prefix) +} + +func (e RepeatedPrefixError) Unwrap() error { + return e.OriginalError +} diff --git a/pkg/apiserver/controllers/v1/errors_test.go b/pkg/apiserver/controllers/v1/errors_test.go new file mode 100644 index 00000000000..89c561f83bd --- /dev/null +++ b/pkg/apiserver/controllers/v1/errors_test.go @@ -0,0 +1,57 @@ +package v1 + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCollapseRepeatedPrefix(t *testing.T) { + tests := []struct { + input string + prefix string + want string + }{ + { + input: "aaabbbcccaaa", + prefix: "aaa", + want: "aaabbbcccaaa", + }, { + input: "hellohellohello world", + prefix: "hello", + want: "hello world", + }, { + input: "ababababxyz", + prefix: "ab", + want: "abxyz", + }, { + input: "xyzxyzxyzxyzxyz", + prefix: "xyz", + want: "xyz", + }, { + input: "123123123456", + prefix: "456", + want: "123123123456", + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + assert.Equal(t, tt.want, collapseRepeatedPrefix(tt.input, tt.prefix)) + }) + } +} + +func TestRepeatedPrefixError(t *testing.T) { + originalErr := errors.New("hellohellohello world") + wrappedErr := RepeatedPrefixError{OriginalError: originalErr, Prefix: "hello"} + + want := "hello world" + + assert.Equal(t, want, wrappedErr.Error()) + + assert.Equal(t, originalErr, errors.Unwrap(wrappedErr)) + require.ErrorIs(t, wrappedErr, originalErr) +} diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go new file mode 100644 index 00000000000..74f27bb6cf4 --- /dev/null +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -0,0 +1,204 @@ +package v1 + +import ( + "encoding/json" + "errors" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// updateBaseMetrics updates the base metrics for a machine or bouncer +func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { + switch { + case machineID != "": + c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) + case bouncer != nil: + c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) + default: + return errors.New("no machineID or bouncerName set") + } + + return nil +} + +// UsageMetrics receives metrics from log processors and remediation components +func (c *Controller) UsageMetrics(gctx *gin.Context) { + var input models.AllMetrics + + logger := log.WithField("func", "UsageMetrics") + + // parse the payload + + if err := gctx.ShouldBindJSON(&input); err != nil { + logger.Errorf("Failed to bind json: %s", err) + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + + return + } + + if err := input.Validate(strfmt.Default); err != nil { + // work around a nuisance in the generated code + cleanErr := RepeatedPrefixError{ + OriginalError: err, + Prefix: "validation failure list:\n", + } + logger.Errorf("Failed to validate usage metrics: %s", cleanErr) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": cleanErr.Error()}) + + return + } + + var ( + generatedType metric.GeneratedType + generatedBy string + ) + + bouncer, _ := getBouncerFromContext(gctx) + if bouncer != nil { + logger.Tracef("Received usage metris for bouncer: %s", bouncer.Name) + + generatedType = metric.GeneratedTypeRC + generatedBy = bouncer.Name + } + + machineID, _ := getMachineIDFromContext(gctx) + if machineID != "" { + logger.Tracef("Received usage metrics for log processor: %s", machineID) + + generatedType = metric.GeneratedTypeLP + generatedBy = machineID + } + + if generatedBy == "" { + // how did we get here? + logger.Error("No machineID or bouncer in request context after authentication") + gctx.JSON(http.StatusInternalServerError, gin.H{"message": "No machineID or bouncer in request context after authentication"}) + + return + } + + if machineID != "" && bouncer != nil { + logger.Errorf("Payload has both machineID and bouncer") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has both LP and RC data"}) + + return + } + + var ( + payload map[string]any + baseMetrics models.BaseMetrics + hubItems models.HubItems + datasources map[string]int64 + ) + + switch len(input.LogProcessors) { + case 0: + if machineID != "" { + logger.Errorf("Missing log processor data") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing log processor data"}) + + return + } + case 1: + // the final slice can't have more than one item, + // guaranteed by the swagger schema + item0 := input.LogProcessors[0] + + err := item0.Validate(strfmt.Default) + if err != nil { + logger.Errorf("Failed to validate log processor data: %s", err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + + return + } + + payload = map[string]any{ + "metrics": item0.Metrics, + } + baseMetrics = item0.BaseMetrics + hubItems = item0.HubItems + datasources = item0.Datasources + default: + logger.Errorf("Payload has more than one log processor") + // this is not checked in the swagger schema + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one log processor"}) + + return + } + + switch len(input.RemediationComponents) { + case 0: + if bouncer != nil { + logger.Errorf("Missing remediation component data") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing remediation component data"}) + + return + } + case 1: + item0 := input.RemediationComponents[0] + + err := item0.Validate(strfmt.Default) + if err != nil { + logger.Errorf("Failed to validate remediation component data: %s", err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + + return + } + + payload = map[string]any{ + "type": item0.Type, + "metrics": item0.Metrics, + } + baseMetrics = item0.BaseMetrics + default: + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one remediation component"}) + return + } + + if baseMetrics.Os == nil { + baseMetrics.Os = &models.OSversion{ + Name: ptr.Of(""), + Version: ptr.Of(""), + } + } + + err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) + if err != nil { + logger.Errorf("Failed to update base metrics: %s", err) + c.HandleDBErrors(gctx, err) + + return + } + + jsonPayload, err := json.Marshal(payload) + if err != nil { + logger.Errorf("Failed to marshal usage metrics: %s", err) + c.HandleDBErrors(gctx, err) + + return + } + + receivedAt := time.Now().UTC() + + if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { + logger.Error(err) + c.HandleDBErrors(gctx, err) + + return + } + + // if CreateMetrics() returned nil, the metric was already there, we're good + // and don't split hair about 201 vs 200/204 + + gctx.Status(http.StatusCreated) +} diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go new file mode 100644 index 00000000000..41dd0ccdc2c --- /dev/null +++ b/pkg/apiserver/usage_metrics_test.go @@ -0,0 +1,384 @@ +package apiserver + +import ( + "context" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +func TestLPMetrics(t *testing.T) { + tests := []struct { + name string + body string + expectedStatusCode int + expectedResponse string + expectedMetricsCount int + expectedOSName string + expectedOSVersion string + expectedFeatureFlags string + authType string + }{ + { + name: "empty metrics for LP", + body: `{ + }`, + expectedStatusCode: 400, + expectedResponse: "Missing log processor data", + authType: PASSWORD, + }, + { + name: "basic metrics with empty dynamic metrics for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "basic metrics with dynamic metrics for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [{"meta":{"utc_now_timestamp":42, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }, {"meta":{"utc_now_timestamp":43, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "wrong auth type for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 400, + expectedResponse: "Missing remediation component data", + authType: APIKEY, + }, + { + name: "missing OS field for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + expectedMetricsCount: 1, + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "missing datasources for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "hub_items": {} + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "log_processors.0.datasources in body is required", + authType: PASSWORD, + }, + { + name: "missing feature flags for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedOSName: "foo", + expectedOSVersion: "42", + authType: PASSWORD, + }, + { + name: "missing OS name", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "log_processors.0.os.name in body is required", + authType: PASSWORD, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lapi := SetupLAPITest(t) + + dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + if err != nil { + t.Fatalf("unable to create database client: %s", err) + } + + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + + assert.Equal(t, tt.expectedStatusCode, w.Code) + assert.Contains(t, w.Body.String(), tt.expectedResponse) + + machine, _ := dbClient.QueryMachineByID("test") + metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") + + assert.Len(t, metrics, tt.expectedMetricsCount) + assert.Equal(t, tt.expectedOSName, machine.Osname) + assert.Equal(t, tt.expectedOSVersion, machine.Osversion) + assert.Equal(t, tt.expectedFeatureFlags, machine.Featureflags) + + if len(metrics) > 0 { + assert.Equal(t, "test", metrics[0].GeneratedBy) + assert.Equal(t, metric.GeneratedType("LP"), metrics[0].GeneratedType) + } + }) + } +} + +func TestRCMetrics(t *testing.T) { + tests := []struct { + name string + body string + expectedStatusCode int + expectedResponse string + expectedMetricsCount int + expectedOSName string + expectedOSVersion string + expectedFeatureFlags string + authType string + }{ + { + name: "empty metrics for RC", + body: `{ + }`, + expectedStatusCode: 400, + expectedResponse: "Missing remediation component data", + authType: APIKEY, + }, + { + name: "basic metrics with empty dynamic metrics for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "basic metrics with dynamic metrics for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [{"meta":{"utc_now_timestamp":42, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }, {"meta":{"utc_now_timestamp":43, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "wrong auth type for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 400, + expectedResponse: "Missing log processor data", + authType: PASSWORD, + }, + { + name: "missing OS field for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + expectedMetricsCount: 1, + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "missing feature flags for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedOSName: "foo", + expectedOSVersion: "42", + authType: APIKEY, + }, + { + name: "missing OS name", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "remediation_components.0.os.name in body is required", + authType: APIKEY, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lapi := SetupLAPITest(t) + + dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + if err != nil { + t.Fatalf("unable to create database client: %s", err) + } + + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + + assert.Equal(t, tt.expectedStatusCode, w.Code) + assert.Contains(t, w.Body.String(), tt.expectedResponse) + + bouncer, _ := dbClient.SelectBouncerByName("test") + metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") + + assert.Len(t, metrics, tt.expectedMetricsCount) + assert.Equal(t, tt.expectedOSName, bouncer.Osname) + assert.Equal(t, tt.expectedOSVersion, bouncer.Osversion) + assert.Equal(t, tt.expectedFeatureFlags, bouncer.Featureflags) + + if len(metrics) > 0 { + assert.Equal(t, "test", metrics[0].GeneratedBy) + assert.Equal(t, metric.GeneratedType("RC"), metrics[0].GeneratedType) + } + }) + } +} diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 2f41beaf55e..7570b63011e 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -61,9 +61,9 @@ func TestLoadCrowdsec(t *testing.T) { AcquisitionFiles: []string{acquisFullPath}, SimulationFilePath: "./testdata/simulation.yaml", // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, @@ -100,9 +100,9 @@ func TestLoadCrowdsec(t *testing.T) { ConsoleContextValueLength: 0, AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath}, // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationFilePath: "./testdata/simulation.yaml", SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), @@ -139,9 +139,9 @@ func TestLoadCrowdsec(t *testing.T) { AcquisitionFiles: []string{}, SimulationFilePath: "", // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, @@ -184,6 +184,7 @@ func TestLoadCrowdsec(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadCrowdsec() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index a24eb9e13c3..4ca582cf576 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -50,9 +50,10 @@ type AuthGCCfg struct { type FlushDBCfg struct { MaxItems *int `yaml:"max_items,omitempty"` // We could unmarshal as time.Duration, but alert filters right now are a map of strings - MaxAge *string `yaml:"max_age,omitempty"` - BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` - AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MaxAge *string `yaml:"max_age,omitempty"` + BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` + AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MetricsMaxAge *time.Duration `yaml:"metrics_max_age,omitempty"` } func (c *Config) LoadDBConfig(inCli bool) error { @@ -80,9 +81,9 @@ func (c *Config) LoadDBConfig(inCli bool) error { case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) log.Warning( - "You are using sqlite without WAL, this can have a performance impact. " + - "If you do not store the database in a network share, set db_config.use_wal to true. " + - "Set explicitly to false to disable this warning.") + "You are using sqlite without WAL, this can have a performance impact. " + + "If you do not store the database in a network share, set db_config.use_wal to true. " + + "Set explicitly to false to disable this warning.") case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 03a3227301d..ff750e63c59 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -2,14 +2,36 @@ package database import ( "fmt" + "strings" "time" "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/models" ) +func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { + os := baseMetrics.Os + features := strings.Join(baseMetrics.FeatureFlags, ",") + + _, err := c.Ent.Bouncer. + Update(). + Where(bouncer.NameEQ(bouncerName)). + SetNillableVersion(baseMetrics.Version). + SetOsname(*os.Name). + SetOsversion(*os.Version). + SetFeatureflags(features). + SetType(bouncerType). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) + } + + return nil +} + func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) if err != nil { diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go index 236d54da25d..47f3b4df4e5 100644 --- a/pkg/database/ent/metric.go +++ b/pkg/database/ent/metric.go @@ -22,8 +22,8 @@ type Metric struct { // Source of the metrics: machine id, bouncer name... // It must come from the auth middleware. GeneratedBy string `json:"generated_by,omitempty"` - // When the metrics are collected/calculated at the source - CollectedAt time.Time `json:"collected_at,omitempty"` + // When the metrics are received by LAPI + ReceivedAt time.Time `json:"received_at,omitempty"` // When the metrics are sent to the console PushedAt *time.Time `json:"pushed_at,omitempty"` // The actual metrics (item0) @@ -40,7 +40,7 @@ func (*Metric) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullInt64) case metric.FieldGeneratedType, metric.FieldGeneratedBy, metric.FieldPayload: values[i] = new(sql.NullString) - case metric.FieldCollectedAt, metric.FieldPushedAt: + case metric.FieldReceivedAt, metric.FieldPushedAt: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -75,11 +75,11 @@ func (m *Metric) assignValues(columns []string, values []any) error { } else if value.Valid { m.GeneratedBy = value.String } - case metric.FieldCollectedAt: + case metric.FieldReceivedAt: if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field collected_at", values[i]) + return fmt.Errorf("unexpected type %T for field received_at", values[i]) } else if value.Valid { - m.CollectedAt = value.Time + m.ReceivedAt = value.Time } case metric.FieldPushedAt: if value, ok := values[i].(*sql.NullTime); !ok { @@ -136,8 +136,8 @@ func (m *Metric) String() string { builder.WriteString("generated_by=") builder.WriteString(m.GeneratedBy) builder.WriteString(", ") - builder.WriteString("collected_at=") - builder.WriteString(m.CollectedAt.Format(time.ANSIC)) + builder.WriteString("received_at=") + builder.WriteString(m.ReceivedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.PushedAt; v != nil { builder.WriteString("pushed_at=") diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go index 879f1006d64..78e88982220 100644 --- a/pkg/database/ent/metric/metric.go +++ b/pkg/database/ent/metric/metric.go @@ -17,8 +17,8 @@ const ( FieldGeneratedType = "generated_type" // FieldGeneratedBy holds the string denoting the generated_by field in the database. FieldGeneratedBy = "generated_by" - // FieldCollectedAt holds the string denoting the collected_at field in the database. - FieldCollectedAt = "collected_at" + // FieldReceivedAt holds the string denoting the received_at field in the database. + FieldReceivedAt = "received_at" // FieldPushedAt holds the string denoting the pushed_at field in the database. FieldPushedAt = "pushed_at" // FieldPayload holds the string denoting the payload field in the database. @@ -32,7 +32,7 @@ var Columns = []string{ FieldID, FieldGeneratedType, FieldGeneratedBy, - FieldCollectedAt, + FieldReceivedAt, FieldPushedAt, FieldPayload, } @@ -88,9 +88,9 @@ func ByGeneratedBy(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldGeneratedBy, opts...).ToFunc() } -// ByCollectedAt orders the results by the collected_at field. -func ByCollectedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCollectedAt, opts...).ToFunc() +// ByReceivedAt orders the results by the received_at field. +func ByReceivedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldReceivedAt, opts...).ToFunc() } // ByPushedAt orders the results by the pushed_at field. diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go index e49f80f3411..72bd9d93cd7 100644 --- a/pkg/database/ent/metric/where.go +++ b/pkg/database/ent/metric/where.go @@ -59,9 +59,9 @@ func GeneratedBy(v string) predicate.Metric { return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) } -// CollectedAt applies equality check predicate on the "collected_at" field. It's identical to CollectedAtEQ. -func CollectedAt(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +// ReceivedAt applies equality check predicate on the "received_at" field. It's identical to ReceivedAtEQ. +func ReceivedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldReceivedAt, v)) } // PushedAt applies equality check predicate on the "pushed_at" field. It's identical to PushedAtEQ. @@ -159,44 +159,44 @@ func GeneratedByContainsFold(v string) predicate.Metric { return predicate.Metric(sql.FieldContainsFold(FieldGeneratedBy, v)) } -// CollectedAtEQ applies the EQ predicate on the "collected_at" field. -func CollectedAtEQ(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +// ReceivedAtEQ applies the EQ predicate on the "received_at" field. +func ReceivedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldReceivedAt, v)) } -// CollectedAtNEQ applies the NEQ predicate on the "collected_at" field. -func CollectedAtNEQ(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldNEQ(FieldCollectedAt, v)) +// ReceivedAtNEQ applies the NEQ predicate on the "received_at" field. +func ReceivedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldReceivedAt, v)) } -// CollectedAtIn applies the In predicate on the "collected_at" field. -func CollectedAtIn(vs ...time.Time) predicate.Metric { - return predicate.Metric(sql.FieldIn(FieldCollectedAt, vs...)) +// ReceivedAtIn applies the In predicate on the "received_at" field. +func ReceivedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldReceivedAt, vs...)) } -// CollectedAtNotIn applies the NotIn predicate on the "collected_at" field. -func CollectedAtNotIn(vs ...time.Time) predicate.Metric { - return predicate.Metric(sql.FieldNotIn(FieldCollectedAt, vs...)) +// ReceivedAtNotIn applies the NotIn predicate on the "received_at" field. +func ReceivedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldReceivedAt, vs...)) } -// CollectedAtGT applies the GT predicate on the "collected_at" field. -func CollectedAtGT(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldGT(FieldCollectedAt, v)) +// ReceivedAtGT applies the GT predicate on the "received_at" field. +func ReceivedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldReceivedAt, v)) } -// CollectedAtGTE applies the GTE predicate on the "collected_at" field. -func CollectedAtGTE(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldGTE(FieldCollectedAt, v)) +// ReceivedAtGTE applies the GTE predicate on the "received_at" field. +func ReceivedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldReceivedAt, v)) } -// CollectedAtLT applies the LT predicate on the "collected_at" field. -func CollectedAtLT(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldLT(FieldCollectedAt, v)) +// ReceivedAtLT applies the LT predicate on the "received_at" field. +func ReceivedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldReceivedAt, v)) } -// CollectedAtLTE applies the LTE predicate on the "collected_at" field. -func CollectedAtLTE(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldLTE(FieldCollectedAt, v)) +// ReceivedAtLTE applies the LTE predicate on the "received_at" field. +func ReceivedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldReceivedAt, v)) } // PushedAtEQ applies the EQ predicate on the "pushed_at" field. diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go index 8fa656db427..973cddd41d0 100644 --- a/pkg/database/ent/metric_create.go +++ b/pkg/database/ent/metric_create.go @@ -32,9 +32,9 @@ func (mc *MetricCreate) SetGeneratedBy(s string) *MetricCreate { return mc } -// SetCollectedAt sets the "collected_at" field. -func (mc *MetricCreate) SetCollectedAt(t time.Time) *MetricCreate { - mc.mutation.SetCollectedAt(t) +// SetReceivedAt sets the "received_at" field. +func (mc *MetricCreate) SetReceivedAt(t time.Time) *MetricCreate { + mc.mutation.SetReceivedAt(t) return mc } @@ -103,8 +103,8 @@ func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedBy(); !ok { return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} } - if _, ok := mc.mutation.CollectedAt(); !ok { - return &ValidationError{Name: "collected_at", err: errors.New(`ent: missing required field "Metric.collected_at"`)} + if _, ok := mc.mutation.ReceivedAt(); !ok { + return &ValidationError{Name: "received_at", err: errors.New(`ent: missing required field "Metric.received_at"`)} } if _, ok := mc.mutation.Payload(); !ok { return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} @@ -143,9 +143,9 @@ func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { _spec.SetField(metric.FieldGeneratedBy, field.TypeString, value) _node.GeneratedBy = value } - if value, ok := mc.mutation.CollectedAt(); ok { - _spec.SetField(metric.FieldCollectedAt, field.TypeTime, value) - _node.CollectedAt = value + if value, ok := mc.mutation.ReceivedAt(); ok { + _spec.SetField(metric.FieldReceivedAt, field.TypeTime, value) + _node.ReceivedAt = value } if value, ok := mc.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 60bf72a486b..986f5bc8c67 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -254,7 +254,7 @@ var ( {Name: "id", Type: field.TypeInt, Increment: true}, {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, {Name: "generated_by", Type: field.TypeString}, - {Name: "collected_at", Type: field.TypeTime}, + {Name: "received_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, {Name: "payload", Type: field.TypeString, Size: 2147483647}, } @@ -263,13 +263,6 @@ var ( Name: "metrics", Columns: MetricsColumns, PrimaryKey: []*schema.Column{MetricsColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "metric_generated_type_generated_by_collected_at", - Unique: true, - Columns: []*schema.Column{MetricsColumns[1], MetricsColumns[2], MetricsColumns[3]}, - }, - }, } // Tables holds all the tables in the schema. Tables = []*schema.Table{ diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 5b70457c512..5c6596f3db4 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -8640,7 +8640,7 @@ type MetricMutation struct { id *int generated_type *metric.GeneratedType generated_by *string - collected_at *time.Time + received_at *time.Time pushed_at *time.Time payload *string clearedFields map[string]struct{} @@ -8819,40 +8819,40 @@ func (m *MetricMutation) ResetGeneratedBy() { m.generated_by = nil } -// SetCollectedAt sets the "collected_at" field. -func (m *MetricMutation) SetCollectedAt(t time.Time) { - m.collected_at = &t +// SetReceivedAt sets the "received_at" field. +func (m *MetricMutation) SetReceivedAt(t time.Time) { + m.received_at = &t } -// CollectedAt returns the value of the "collected_at" field in the mutation. -func (m *MetricMutation) CollectedAt() (r time.Time, exists bool) { - v := m.collected_at +// ReceivedAt returns the value of the "received_at" field in the mutation. +func (m *MetricMutation) ReceivedAt() (r time.Time, exists bool) { + v := m.received_at if v == nil { return } return *v, true } -// OldCollectedAt returns the old "collected_at" field's value of the Metric entity. +// OldReceivedAt returns the old "received_at" field's value of the Metric entity. // If the Metric object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetricMutation) OldCollectedAt(ctx context.Context) (v time.Time, err error) { +func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCollectedAt is only allowed on UpdateOne operations") + return v, errors.New("OldReceivedAt is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCollectedAt requires an ID field in the mutation") + return v, errors.New("OldReceivedAt requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldCollectedAt: %w", err) + return v, fmt.Errorf("querying old value for OldReceivedAt: %w", err) } - return oldValue.CollectedAt, nil + return oldValue.ReceivedAt, nil } -// ResetCollectedAt resets all changes to the "collected_at" field. -func (m *MetricMutation) ResetCollectedAt() { - m.collected_at = nil +// ResetReceivedAt resets all changes to the "received_at" field. +func (m *MetricMutation) ResetReceivedAt() { + m.received_at = nil } // SetPushedAt sets the "pushed_at" field. @@ -8981,8 +8981,8 @@ func (m *MetricMutation) Fields() []string { if m.generated_by != nil { fields = append(fields, metric.FieldGeneratedBy) } - if m.collected_at != nil { - fields = append(fields, metric.FieldCollectedAt) + if m.received_at != nil { + fields = append(fields, metric.FieldReceivedAt) } if m.pushed_at != nil { fields = append(fields, metric.FieldPushedAt) @@ -9002,8 +9002,8 @@ func (m *MetricMutation) Field(name string) (ent.Value, bool) { return m.GeneratedType() case metric.FieldGeneratedBy: return m.GeneratedBy() - case metric.FieldCollectedAt: - return m.CollectedAt() + case metric.FieldReceivedAt: + return m.ReceivedAt() case metric.FieldPushedAt: return m.PushedAt() case metric.FieldPayload: @@ -9021,8 +9021,8 @@ func (m *MetricMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldGeneratedType(ctx) case metric.FieldGeneratedBy: return m.OldGeneratedBy(ctx) - case metric.FieldCollectedAt: - return m.OldCollectedAt(ctx) + case metric.FieldReceivedAt: + return m.OldReceivedAt(ctx) case metric.FieldPushedAt: return m.OldPushedAt(ctx) case metric.FieldPayload: @@ -9050,12 +9050,12 @@ func (m *MetricMutation) SetField(name string, value ent.Value) error { } m.SetGeneratedBy(v) return nil - case metric.FieldCollectedAt: + case metric.FieldReceivedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCollectedAt(v) + m.SetReceivedAt(v) return nil case metric.FieldPushedAt: v, ok := value.(time.Time) @@ -9135,8 +9135,8 @@ func (m *MetricMutation) ResetField(name string) error { case metric.FieldGeneratedBy: m.ResetGeneratedBy() return nil - case metric.FieldCollectedAt: - m.ResetCollectedAt() + case metric.FieldReceivedAt: + m.ResetReceivedAt() return nil case metric.FieldPushedAt: m.ResetPushedAt() diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index b47da78bdf3..319c67b7aa7 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -3,7 +3,6 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" ) // Metric is actually a set of metrics collected by a device @@ -21,9 +20,9 @@ func (Metric) Fields() []ent.Field { field.String("generated_by"). Immutable(). Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), - field.Time("collected_at"). + field.Time("received_at"). Immutable(). - Comment("When the metrics are collected/calculated at the source"), + Comment("When the metrics are received by LAPI"), field.Time("pushed_at"). Nillable(). Optional(). @@ -33,11 +32,3 @@ func (Metric) Fields() []ent.Field { Comment("The actual metrics (item0)"), } } - -func (Metric) Indexes() []ent.Index { - return []ent.Index{ - // Don't store the same metrics multiple times. - index.Fields("generated_type", "generated_by", "collected_at"). - Unique(), - } -} diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 5a1f0bea5bf..5d53d10c942 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -8,15 +8,24 @@ import ( "github.com/go-co-op/gocron" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/types" ) +const ( + // how long to keep metrics in the local database + defaultMetricsMaxAge = 7 * 24 * time.Hour + flushInterval = 1 * time.Minute +) + func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" @@ -91,17 +100,46 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } } - baJob, err := scheduler.Every(1).Minute().Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) } baJob.SingletonMode() + + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, config.MetricsMaxAge) + if err != nil { + return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) + } + + metricsJob.SingletonMode() + scheduler.StartAsync() return scheduler, nil } +// flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not +func (c *Client) flushMetrics(maxAge *time.Duration) { + if maxAge == nil { + maxAge = ptr.Of(defaultMetricsMaxAge) + } + + c.Log.Debugf("flushing metrics older than %s", maxAge) + + deleted, err := c.Ent.Metric.Delete().Where( + metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), + ).Exec(c.CTX) + if err != nil { + c.Log.Errorf("while flushing metrics: %s", err) + return + } + + if deleted > 0 { + c.Log.Debugf("flushed %d metrics snapshots", deleted) + } +} + func (c *Client) FlushOrphans() { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ @@ -117,7 +155,6 @@ func (c *Client) FlushOrphans() { eventsCount, err = c.Ent.Decision.Delete().Where( decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) - if err != nil { c.Log.Warningf("error while deleting orphan decisions: %s", err) return @@ -138,7 +175,6 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { ).Where( bouncer.AuthTypeEQ(authType), ).Exec(c.CTX) - if err != nil { c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) return @@ -159,7 +195,6 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { machine.Not(machine.HasAlerts()), machine.AuthTypeEQ(authType), ).Exec(c.CTX) - if err != nil { c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) return @@ -253,7 +288,6 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { if maxid > 0 { // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) - if err != nil { c.Log.Errorf("FlushAlerts: Could not delete alerts: %s", err) return fmt.Errorf("could not delete alerts: %w", err) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 18fd32fdd84..21349b8b687 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -2,6 +2,7 @@ package database import ( "fmt" + "strings" "time" "github.com/go-openapi/strfmt" @@ -10,6 +11,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -18,6 +21,48 @@ const ( CapiListsMachineID = types.ListOrigin ) +func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { + os := baseMetrics.Os + features := strings.Join(baseMetrics.FeatureFlags, ",") + + var heartbeat time.Time + + if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { + heartbeat = time.Now().UTC() + } else { + heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) + } + + hubState := map[string][]schema.ItemState{} + for itemType, items := range hubItems { + hubState[itemType] = []schema.ItemState{} + for _, item := range items { + hubState[itemType] = append(hubState[itemType], schema.ItemState{ + Name: item.Name, + Status: item.Status, + Version: item.Version, + }) + } + } + + _, err := c.Ent.Machine. + Update(). + Where(machine.MachineIdEQ(machineID)). + SetNillableVersion(baseMetrics.Version). + SetOsname(*os.Name). + SetOsversion(*os.Version). + SetFeatureflags(features). + SetLastHeartbeat(heartbeat). + SetHubstate(hubState). + SetDatasources(datasources). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update base machine metrics in database: %w", err) + } + + return nil +} + func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) if err != nil { @@ -158,7 +203,7 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { SetScenarios(scenarios). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine in database: %s", err) + return fmt.Errorf("unable to update machine in database: %w", err) } return nil @@ -169,7 +214,7 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { SetIpAddress(ipAddr). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine IP in database: %s", err) + return fmt.Errorf("unable to update machine IP in database: %w", err) } return nil @@ -180,7 +225,7 @@ func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { SetVersion(ipAddr). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine version in database: %s", err) + return fmt.Errorf("unable to update machine version in database: %w", err) } return nil diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go new file mode 100644 index 00000000000..3bc5e7b5d32 --- /dev/null +++ b/pkg/database/metrics.go @@ -0,0 +1,73 @@ +package database + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { + metric, err := c.Ent.Metric. + Create(). + SetGeneratedType(generatedType). + SetGeneratedBy(generatedBy). + SetReceivedAt(receivedAt). + SetPayload(payload). + Save(c.CTX) + if err != nil { + c.Log.Warningf("CreateMetric: %s", err) + return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) + } + + return metric, nil +} + +func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeLP), + metric.GeneratedByEQ(machineId), + metric.PushedAtIsNil(), + ). + // XXX: do we need to sort? + Order(ent.Desc(metric.FieldReceivedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) + return nil, fmt.Errorf("getting LP usage metrics by origin %s: %w", machineId, err) + } + + return metrics, nil +} + +func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + metric.GeneratedByEQ(bouncerName), + metric.PushedAtIsNil(), + ). + Order(ent.Desc(metric.FieldReceivedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) + return nil, fmt.Errorf("getting bouncer usage metrics by name %s: %w", bouncerName, err) + } + + return metrics, nil +} + +func (c *Client) MarkUsageMetricsAsSent(ids []int) error { + _, err := c.Ent.Metric.Update(). + Where(metric.IDIn(ids...)). + SetPushedAt(time.Now().UTC()). + Save(c.CTX) + if err != nil { + c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) + return fmt.Errorf("marking usage metrics as sent: %w", err) + } + + return nil +} diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 84a55dc88c1..1851ed0ac14 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -15,7 +15,6 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start - skip "require the usage_metrics endpoint on apiserver" } teardown() { @@ -75,6 +74,18 @@ teardown() { payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" refute_output + + payload=$(yq -o j '.remediation_components[0].metrics = [{"meta": {}}]' <<<"$payload") + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + remediation_components.0.metrics.0.items in body is required + validation failure list: + remediation_components.0.metrics.0.meta.utc_now_timestamp in body is required + remediation_components.0.metrics.0.meta.window_size_seconds in body is required + EOT } @test "rc usage metrics (good payload)" { @@ -116,7 +127,7 @@ teardown() { rune -0 cscli metrics show bouncers -o json # aggregation is ok -- we are truncating, not rounding, because the float is mandated by swagger. # but without labels the origin string is empty - assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' + assert_json '{bouncers:{testbouncer:{"": {foo: {dogyear: 2, pound: 5}}}}}' rune -0 cscli metrics show bouncers assert_output - <<-EOT @@ -137,7 +148,7 @@ teardown() { { "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, "items":[ - {"name": "active_decisions", "unit": "ip", "value": 51936, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 500, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, @@ -191,7 +202,7 @@ teardown() { }, "lists:firehol_voipbl": { "active_decisions": { - "ip": 51936 + "ip": 500 }, "dropped": { "byte": 3847, @@ -219,14 +230,198 @@ teardown() { | cscli (manual decisions) | 1 | 380 | 10 | - | - | | lists:anotherlist | - | 0 | 0 | - | - | | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | - | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | + | lists:firehol_voipbl | 500 | 3.85k | 58 | - | - | + +----------------------------------+------------------+---------+---------+---------+-------+ + | Total | 501 | 9.06k | 191 | 2 | 5 | + +----------------------------------+------------------+---------+---------+---------+-------+ + EOT + + # active_decisions is actually a gauge: values should not be aggregated, keep only the latest one + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 250, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 10, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + assert_json '{ + "bouncers": { + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 10 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 250 + }, + "dropped": { + "byte": 3847, + "packet": 58 + }, + }, + "lists:anotherlist": { + "dropped": { + "byte": 0, + "packet": 0 + } + } + } + } + }' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------------+------------------+-------------------+-----------------+ + | Origin | active_decisions | dropped | foo | + | | IPs | bytes | packets | dogyear | pound | + +----------------------------------+------------------+---------+---------+---------+-------+ + | CAPI (community blocklist) | - | 3.80k | 100 | - | - | + | cscli (manual decisions) | 10 | 380 | 10 | - | - | + | lists:anotherlist | - | 0 | 0 | - | - | + | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | + | lists:firehol_voipbl | 250 | 3.85k | 58 | - | - | +----------------------------------+------------------+---------+---------+---------+-------+ - | Total | 51.94k | 9.06k | 191 | 2 | 5 | + | Total | 260 | 9.06k | 191 | 2 | 5 | +----------------------------------+------------------+---------+---------+---------+-------+ EOT +} - # TODO: multiple item lists +@test "rc usage metrics (unknown metrics)" { + # new metrics are introduced in a new bouncer version, unknown by this version of cscli: some are gauges, some are not + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + log_processors: [] + EOT + ) + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707460000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 30, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 15, "labels": {"origin": "cscli"}} + ] + }, { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 20, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 10, "labels": {"origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers: {testbouncer: {cscli: {ima_gauge: {second: 30}, notagauge: {inch: 25}}}}}' + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +--------------------------+--------+-----------+ + | Origin | ima | notagauge | + | | second | inch | + +--------------------------+--------+-----------+ + | cscli (manual decisions) | 30 | 25 | + +--------------------------+--------+-----------+ + | Total | 30 | 25 | + +--------------------------+--------+-----------+ + EOT +} + +@test "rc usage metrics (ipv4/ipv6)" { + # gauge metrics are not aggregated over time, but they are over ip type + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + log_processors: [] + EOT + ) + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707460000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 200, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "active_decisions", "unit": "ip", "value": 30, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + }, { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 400, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "active_decisions", "unit": "ip", "value": 50, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers: {testbouncer: {cscli: {active_decisions: {ip: 230}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +--------------------------+------------------+ + | Origin | active_decisions | + | | IPs | + +--------------------------+------------------+ + | cscli (manual decisions) | 230 | + +--------------------------+------------------+ + | Total | 230 | + +--------------------------+------------------+ + EOT } @test "rc usage metrics (multiple bouncers)" { diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats index e63078124a9..3b73839e753 100644 --- a/test/bats/08_metrics_machines.bats +++ b/test/bats/08_metrics_machines.bats @@ -15,7 +15,6 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start - skip "require the usage_metrics endpoint on apiserver" } teardown() { diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 849b3a5b35c..554308ae962 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -162,6 +162,35 @@ teardown() { rune cscli bouncers delete localhost@127.0.0.1 } +@test "a bouncer authenticated with TLS can send metrics" { + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + # with mutual authentication there is no api key, so it's detected as RC if user agent != crowdsec + + rune -22 curl --fail-with-body -sS \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/usage-metrics -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing remediation component data"}' + + rune -22 curl --fail-with-body -sS \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + --user-agent "crowdsec/someversion" \ + https://localhost:8080/v1/usage-metrics -X POST --data "$payload" + assert_stderr --partial 'error: 401' + assert_json '{code:401, message: "cookie token is empty"}' + + rune cscli bouncers delete localhost@127.0.0.1 +} + @test "simulate a bouncer request with an invalid cert" { rune -77 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_invalid.pem" \ From 24bd8bb92cbb045cc4259bbc709a6145e1ee352b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:50:19 +0200 Subject: [PATCH 236/318] perf: retrieve unsorted metrics (#3148) --- pkg/database/metrics.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 3bc5e7b5d32..7626c39f6f1 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -31,8 +31,6 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, metric.GeneratedByEQ(machineId), metric.PushedAtIsNil(), ). - // XXX: do we need to sort? - Order(ent.Desc(metric.FieldReceivedAt)). All(c.CTX) if err != nil { c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) @@ -49,7 +47,6 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric metric.GeneratedByEQ(bouncerName), metric.PushedAtIsNil(), ). - Order(ent.Desc(metric.FieldReceivedAt)). All(c.CTX) if err != nil { c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) From a7ec842bce150f0f9738cac5f59d603df7cf90fb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 11:00:38 +0200 Subject: [PATCH 237/318] docker: symlink all data files to the staging area (#3120) --- docker/docker_start.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 4db421e7c87..0ae8841e029 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -213,15 +213,16 @@ if [ -n "$CERT_FILE" ] || [ -n "$KEY_FILE" ] ; then export LAPI_KEY_FILE=${LAPI_KEY_FILE:-$KEY_FILE} fi -# Check and prestage databases -for geodb in GeoLite2-ASN.mmdb GeoLite2-City.mmdb; do - # We keep the pre-populated geoip databases in /staging instead of /var, - # because if the data directory is bind-mounted from the host, it will be - # empty and the files will be out of reach, requiring a runtime download. - # We link to them to save about 80Mb compared to cp/mv. - if [ ! -e "/var/lib/crowdsec/data/$geodb" ] && [ -e "/staging/var/lib/crowdsec/data/$geodb" ]; then - mkdir -p /var/lib/crowdsec/data - ln -s "/staging/var/lib/crowdsec/data/$geodb" /var/lib/crowdsec/data/ +# Link the preloaded data files when the data dir is mounted (common case) +# The symlinks can be overridden by hub upgrade +for target in "/staging/var/lib/crowdsec/data"/*; do + fname="$(basename "$target")" + # skip the db and wal files + if [[ $fname == crowdsec.db* ]]; then + continue + fi + if [ ! -e "/var/lib/crowdsec/data/$fname" ]; then + ln -s "$target" "/var/lib/crowdsec/data/$fname" fi done From 36d15fedce1540bf957d6dedcb5193783720ce21 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 24 Jul 2024 12:20:23 +0200 Subject: [PATCH 238/318] Use the new hub api url (#3132) * new hapi url * mock url in tests has to be updated --------- Co-authored-by: marco --- pkg/csconfig/cscli.go | 20 +++++++-------- pkg/cwhub/cwhub_test.go | 18 ++++++------- pkg/cwhub/doc.go | 48 +++++++++++++++++------------------ pkg/cwhub/hub_test.go | 2 +- pkg/cwhub/itemupgrade_test.go | 4 +-- 5 files changed, 45 insertions(+), 47 deletions(-) diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go index 7fff03864ef..9393156c0ed 100644 --- a/pkg/csconfig/cscli.go +++ b/pkg/csconfig/cscli.go @@ -6,18 +6,18 @@ import ( /*cscli specific config, such as hub directory*/ type CscliCfg struct { - Output string `yaml:"output,omitempty"` - Color string `yaml:"color,omitempty"` - HubBranch string `yaml:"hub_branch"` - HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` - SimulationConfig *SimulationConfig `yaml:"-"` - DbConfig *DatabaseCfg `yaml:"-"` - - SimulationFilePath string `yaml:"-"` - PrometheusUrl string `yaml:"prometheus_uri"` + Output string `yaml:"output,omitempty"` + Color string `yaml:"color,omitempty"` + HubBranch string `yaml:"hub_branch"` + HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` + SimulationConfig *SimulationConfig `yaml:"-"` + DbConfig *DatabaseCfg `yaml:"-"` + + SimulationFilePath string `yaml:"-"` + PrometheusUrl string `yaml:"prometheus_uri"` } -const defaultHubURLTemplate = "https://hub-cdn.crowdsec.net/%s/%s" +const defaultHubURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" func (c *Config) loadCSCLI() error { if c.Cscli == nil { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index d11ed2b9a95..a4641483622 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -16,7 +16,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -const mockURLTemplate = "https://hub-cdn.crowdsec.net/%s/%s" +const mockURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" /* To test : @@ -143,18 +143,18 @@ func fileToStringX(path string) string { func setResponseByPath() { responseByPath = map[string]string{ - "/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), - "/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), - "/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), - "/master/.index.json": fileToStringX("./testdata/index1.json"), - "/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true + "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), + "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), + "/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), + "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), + "/crowdsecurity/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, - "/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true + "/crowdsecurity/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, - "/master/collections/crowdsecurity/foobar_subcollection.yaml": ` + "/crowdsecurity/master/collections/crowdsecurity/foobar_subcollection.yaml": ` blah: blalala qwe: jejwejejw`, - "/master/collections/crowdsecurity/foobar.yaml": ` + "/crowdsecurity/master/collections/crowdsecurity/foobar.yaml": ` blah: blalala qwe: jejwejejw`, } diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 8cbf77ba00f..89d8de0fa8b 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -2,10 +2,10 @@ // // # Definitions // -// - A hub ITEM is a file that defines a parser, a scenario, a collection... in the case of a collection, it has dependencies on other hub items. -// - The hub INDEX is a JSON file that contains a tree of available hub items. -// - A REMOTE HUB is an HTTP server that hosts the hub index and the hub items. It can serve from several branches, usually linked to the CrowdSec version. -// - A LOCAL HUB is a directory that contains a copy of the hub index and the downloaded hub items. +// - A hub ITEM is a file that defines a parser, a scenario, a collection... in the case of a collection, it has dependencies on other hub items. +// - The hub INDEX is a JSON file that contains a tree of available hub items. +// - A REMOTE HUB is an HTTP server that hosts the hub index and the hub items. It can serve from several branches, usually linked to the CrowdSec version. +// - A LOCAL HUB is a directory that contains a copy of the hub index and the downloaded hub items. // // Once downloaded, hub items can be installed by linking to them from the configuration directory. // If an item is present in the configuration directory but it's not a link to the local hub, it is @@ -17,15 +17,15 @@ // // For the local hub (HubDir = /etc/crowdsec/hub): // -// - /etc/crowdsec/hub/.index.json -// - /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml -// - /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml +// - /etc/crowdsec/hub/.index.json +// - /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml +// - /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml // // For the configuration directory (InstallDir = /etc/crowdsec): // -// - /etc/crowdsec/parsers/{stage}/{parser-name.yaml} -> /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml -// - /etc/crowdsec/scenarios/{scenario-name.yaml} -> /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml -// - /etc/crowdsec/scenarios/local-scenario.yaml +// - /etc/crowdsec/parsers/{stage}/{parser-name.yaml} -> /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml +// - /etc/crowdsec/scenarios/{scenario-name.yaml} -> /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml +// - /etc/crowdsec/scenarios/local-scenario.yaml // // Note that installed items are not grouped by author, this may change in the future if we want to // support items with the same name from different authors. @@ -35,11 +35,10 @@ // Additionally, an item can reference a DATA SET that is installed in a different location than // the item itself. These files are stored in the data directory (InstallDataDir = /var/lib/crowdsec/data). // -// - /var/lib/crowdsec/data/http_path_traversal.txt -// - /var/lib/crowdsec/data/jira_cve_2021-26086.txt -// - /var/lib/crowdsec/data/log4j2_cve_2021_44228.txt -// - /var/lib/crowdsec/data/sensitive_data.txt -// +// - /var/lib/crowdsec/data/http_path_traversal.txt +// - /var/lib/crowdsec/data/jira_cve_2021-26086.txt +// - /var/lib/crowdsec/data/log4j2_cve_2021_44228.txt +// - /var/lib/crowdsec/data/sensitive_data.txt // // # Using the package // @@ -87,13 +86,13 @@ // // You can also install items if they have already been downloaded: // -// // install a parser -// force := false -// downloadOnly := false -// err := parser.Install(force, downloadOnly) -// if err != nil { -// return fmt.Errorf("unable to install parser: %w", err) -// } +// // install a parser +// force := false +// downloadOnly := false +// err := parser.Install(force, downloadOnly) +// if err != nil { +// return fmt.Errorf("unable to install parser: %w", err) +// } // // As soon as you try to install an item that is not downloaded or is not up-to-date (meaning its computed hash // does not correspond to the latest version available in the index), a download will be attempted and you'll @@ -101,8 +100,8 @@ // // To provide the remote hub configuration, use the second parameter of NewHub(): // -// remoteHub := cwhub.RemoteHubCfg{ -// URLTemplate: "https://hub-cdn.crowdsec.net/%s/%s", +// remoteHub := cwhub.RemoteHubCfg{ +// URLTemplate: "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s", // Branch: "master", // IndexPath: ".index.json", // } @@ -124,5 +123,4 @@ // // Note that the command will fail if the hub has already been synced. If you want to do it (ex. after a configuration // change the application is notified with SIGHUP) you have to instantiate a new hub object and dispose of the old one. -// package cwhub diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 3d4ae5793b3..1c2c9ccceca 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -65,7 +65,7 @@ func TestUpdateIndex(t *testing.T) { fmt.Println("Test 'bad domain'") hub.remote = &RemoteHubCfg{ - URLTemplate: "https://baddomain/%s/%s", + URLTemplate: "https://baddomain/crowdsecurity/%s/%s", Branch: "master", IndexPath: ".index.json", } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 5d302db3345..5f9e4d1944e 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -218,6 +218,6 @@ func assertCollectionDepsInstalled(t *testing.T, hub *Hub, collection string) { } func pushUpdateToCollectionInHub() { - responseByPath["/master/.index.json"] = fileToStringX("./testdata/index2.json") - responseByPath["/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") + responseByPath["/crowdsecurity/master/.index.json"] = fileToStringX("./testdata/index2.json") + responseByPath["/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") } From 20067a85a0df105f921d81c5c26ca6ef1296fb94 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 16:29:38 +0200 Subject: [PATCH 239/318] cscli/hub: don't return error if some file can't be recognized (#3150) In k8s there can be extra directories while mounting config maps, which leads to a failure while parsing the hub state. The PR changes these kind of errors to warnings. --- pkg/cwhub/sync.go | 6 ++++-- test/bats/20_hub_items.bats | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index fd5d6b81220..38bb376ae3b 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -1,6 +1,7 @@ package cwhub import ( + "errors" "fmt" "os" "path/filepath" @@ -99,7 +100,7 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo if ret.ftype != PARSERS && ret.ftype != POSTOVERFLOWS { if !slices.Contains(ItemTypes, ret.stage) { - return nil, fmt.Errorf("unknown configuration type for file '%s'", path) + return nil, errors.New("unknown configuration type") } ret.ftype = ret.stage @@ -196,7 +197,8 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { info, err := h.getItemFileInfo(path, h.logger) if err != nil { - return err + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil } // non symlinks are local user files or hub files diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 1846b3e424c..214d07d927f 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -193,3 +193,10 @@ teardown() { rune -0 jq -c '.tainted' <(output) assert_output 'false' } + +@test "skip files if we can't guess their type" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/foo" + rune -0 touch "$CONFIG_DIR/scenarios/foo/bar.yaml" + rune -0 cscli hub list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/foo/bar.yaml: unknown configuration type" +} From 6f5d75c5f13229e603fbcdc725c51665fabc2c92 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:13:06 +0200 Subject: [PATCH 240/318] cscli metrics: explicit message "no bouncer metrics found" (#3155) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 9 ++++++++- test/bats/08_metrics_bouncer.bats | 11 ++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 7d80e902961..62e68b6bc41 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -447,8 +447,15 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } // Table displays a table of metrics for each bouncer -func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { +func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + found := false + for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) + found = true + } + + if !found && showEmpty { + io.WriteString(out, "No bouncer metrics found.\n\n") } } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 1851ed0ac14..c4dfebbab1d 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -23,10 +23,15 @@ teardown() { #---------- -@test "cscli metrics show bouncers" { - # there are no bouncers, so no metrics yet +@test "cscli metrics show bouncers (empty)" { + # this message is given only if we ask explicitly for bouncers + notfound="No bouncer metrics found." + rune -0 cscli metrics show bouncers - refute_output + assert_output "$notfound" + + rune -0 cscli metrics list + refute_output "$notfound" } @test "rc usage metrics (empty payload)" { From 136dba61d97021181ab574dcea90d816735d686d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Aug 2024 10:55:04 +0200 Subject: [PATCH 241/318] reduce log verbosity, minor CI fixes, lint (#3157) * pkg/cwhub: redundant log messages * CI: fixture output and elapsed time * CI: preload only essential hub items * report full version (including -rc2 etc.) with cscli hub update --debug * lint --- cmd/crowdsec-cli/require/branch.go | 6 +- .../modules/cloudwatch/cloudwatch.go | 31 +++++----- pkg/acquisition/modules/docker/docker.go | 17 ++--- .../modules/journalctl/journalctl.go | 11 ++-- pkg/acquisition/modules/kafka/kafka.go | 2 +- .../modules/kubernetesaudit/k8s_audit.go | 11 ++-- .../syslog/internal/parser/rfc3164/parse.go | 28 ++++----- .../syslog/internal/parser/rfc5424/parse.go | 62 +++++++++---------- pkg/acquisition/modules/syslog/syslog.go | 5 +- pkg/appsec/appsec.go | 3 +- pkg/appsec/appsec_rule/appsec_rule.go | 7 ++- pkg/csplugin/notifier.go | 4 +- pkg/csplugin/utils.go | 4 +- pkg/cwhub/iteminstall.go | 1 - pkg/cwhub/itemupgrade.go | 2 +- pkg/exprhelpers/helpers.go | 5 +- test/bin/preload-hub-items | 11 +++- test/lib/config/config-global | 4 +- test/lib/config/config-local | 5 +- 19 files changed, 119 insertions(+), 100 deletions(-) diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 503cb6d2326..09acc0fef8a 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -12,6 +12,8 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/mod/semver" + "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) @@ -74,13 +76,13 @@ func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { } if csVersion == latest { - log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", csVersion) + log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", version.String()) return "master" } // if current version is greater than the latest we are in pre-release if semver.Compare(csVersion, latest) == 1 { - log.Debugf("Your current crowdsec version seems to be a pre-release (%s), using hub branch 'master'", csVersion) + log.Debugf("Your current crowdsec version seems to be a pre-release (%s), using hub branch 'master'", version.String()) return "master" } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1859bbf0f84..1a78ae6fa7a 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -2,6 +2,7 @@ package cloudwatchacquisition import ( "context" + "errors" "fmt" "net/url" "os" @@ -111,7 +112,7 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { } if len(cw.Config.GroupName) == 0 { - return fmt.Errorf("group_name is mandatory for CloudwatchSource") + return errors.New("group_name is mandatory for CloudwatchSource") } if cw.Config.Mode == "" { @@ -189,7 +190,7 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr } else { if cw.Config.AwsRegion == nil { cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir") - return fmt.Errorf("aws_region is not specified, specify it or aws_config_dir") + return errors.New("aws_region is not specified, specify it or aws_config_dir") } os.Setenv("AWS_REGION", *cw.Config.AwsRegion) } @@ -228,7 +229,7 @@ func (cw *CloudwatchSource) newClient() error { } if sess == nil { - return fmt.Errorf("failed to create aws session") + return errors.New("failed to create aws session") } if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { cw.logger.Debugf("[testing] overloading endpoint with %s", v) @@ -237,7 +238,7 @@ func (cw *CloudwatchSource) newClient() error { cw.cwClient = cloudwatchlogs.New(sess) } if cw.cwClient == nil { - return fmt.Errorf("failed to create cloudwatch client") + return errors.New("failed to create cloudwatch client") } return nil } @@ -516,7 +517,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan } case <-cfg.t.Dying(): cfg.logger.Infof("logstream tail stopping") - return fmt.Errorf("killed") + return errors.New("killed") } } } @@ -527,11 +528,11 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, dsn = strings.TrimPrefix(dsn, cw.GetName()+"://") args := strings.Split(dsn, "?") if len(args) != 2 { - return fmt.Errorf("query is mandatory (at least start_date and end_date or backlog)") + return errors.New("query is mandatory (at least start_date and end_date or backlog)") } frags := strings.Split(args[0], ":") if len(frags) != 2 { - return fmt.Errorf("cloudwatch path must contain group and stream : /my/group/name:stream/name") + return errors.New("cloudwatch path must contain group and stream : /my/group/name:stream/name") } cw.Config.GroupName = frags[0] cw.Config.StreamName = &frags[1] @@ -547,7 +548,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, switch k { case "log_level": if len(v) != 1 { - return fmt.Errorf("expected zero or one value for 'log_level'") + return errors.New("expected zero or one value for 'log_level'") } lvl, err := log.ParseLevel(v[0]) if err != nil { @@ -557,14 +558,14 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, case "profile": if len(v) != 1 { - return fmt.Errorf("expected zero or one value for 'profile'") + return errors.New("expected zero or one value for 'profile'") } awsprof := v[0] cw.Config.AwsProfile = &awsprof cw.logger.Debugf("profile set to '%s'", *cw.Config.AwsProfile) case "start_date": if len(v) != 1 { - return fmt.Errorf("expected zero or one argument for 'start_date'") + return errors.New("expected zero or one argument for 'start_date'") } //let's reuse our parser helper so that a ton of date formats are supported strdate, startDate := parser.GenDateParse(v[0]) @@ -572,7 +573,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, cw.Config.StartTime = &startDate case "end_date": if len(v) != 1 { - return fmt.Errorf("expected zero or one argument for 'end_date'") + return errors.New("expected zero or one argument for 'end_date'") } //let's reuse our parser helper so that a ton of date formats are supported strdate, endDate := parser.GenDateParse(v[0]) @@ -580,7 +581,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, cw.Config.EndTime = &endDate case "backlog": if len(v) != 1 { - return fmt.Errorf("expected zero or one argument for 'backlog'") + return errors.New("expected zero or one argument for 'backlog'") } //let's reuse our parser helper so that a ton of date formats are supported duration, err := time.ParseDuration(v[0]) @@ -605,10 +606,10 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, } if cw.Config.StreamName == nil || cw.Config.GroupName == "" { - return fmt.Errorf("missing stream or group name") + return errors.New("missing stream or group name") } if cw.Config.StartTime == nil || cw.Config.EndTime == nil { - return fmt.Errorf("start_date and end_date or backlog are mandatory in one-shot mode") + return errors.New("start_date and end_date or backlog are mandatory in one-shot mode") } cw.Config.Mode = configuration.CAT_MODE @@ -699,7 +700,7 @@ func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) l := types.Line{} evt := types.Event{} if log.Message == nil { - return evt, fmt.Errorf("nil message") + return evt, errors.New("nil message") } msg := *log.Message if cfg.PrependCloudwatchTimestamp != nil && *cfg.PrependCloudwatchTimestamp { diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 857d7e7af78..9a6e13feee4 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -3,6 +3,7 @@ package dockeracquisition import ( "bufio" "context" + "errors" "fmt" "net/url" "regexp" @@ -88,11 +89,11 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { } if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 && !d.Config.UseContainerLabels { - return fmt.Errorf("no containers names or containers ID configuration provided") + return errors.New("no containers names or containers ID configuration provided") } if d.Config.UseContainerLabels && (len(d.Config.ContainerName) > 0 || len(d.Config.ContainerID) > 0 || len(d.Config.ContainerIDRegexp) > 0 || len(d.Config.ContainerNameRegexp) > 0) { - return fmt.Errorf("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") + return errors.New("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") } d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval) @@ -225,7 +226,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg switch k { case "log_level": if len(v) != 1 { - return fmt.Errorf("only one 'log_level' parameters is required, not many") + return errors.New("only one 'log_level' parameters is required, not many") } lvl, err := log.ParseLevel(v[0]) if err != nil { @@ -234,17 +235,17 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.logger.Logger.SetLevel(lvl) case "until": if len(v) != 1 { - return fmt.Errorf("only one 'until' parameters is required, not many") + return errors.New("only one 'until' parameters is required, not many") } d.containerLogsOptions.Until = v[0] case "since": if len(v) != 1 { - return fmt.Errorf("only one 'since' parameters is required, not many") + return errors.New("only one 'since' parameters is required, not many") } d.containerLogsOptions.Since = v[0] case "follow_stdout": if len(v) != 1 { - return fmt.Errorf("only one 'follow_stdout' parameters is required, not many") + return errors.New("only one 'follow_stdout' parameters is required, not many") } followStdout, err := strconv.ParseBool(v[0]) if err != nil { @@ -254,7 +255,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.containerLogsOptions.ShowStdout = followStdout case "follow_stderr": if len(v) != 1 { - return fmt.Errorf("only one 'follow_stderr' parameters is required, not many") + return errors.New("only one 'follow_stderr' parameters is required, not many") } followStdErr, err := strconv.ParseBool(v[0]) if err != nil { @@ -264,7 +265,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.containerLogsOptions.ShowStderr = followStdErr case "docker_host": if len(v) != 1 { - return fmt.Errorf("only one 'docker_host' parameters is required, not many") + return errors.New("only one 'docker_host' parameters is required, not many") } if err := client.WithHost(v[0])(dockerClient); err != nil { return err diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index e8bb5a3edd5..762dfe9ba12 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -3,6 +3,7 @@ package journalctlacquisition import ( "bufio" "context" + "errors" "fmt" "net/url" "os/exec" @@ -98,7 +99,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err if stdoutscanner == nil { cancel() cmd.Wait() - return fmt.Errorf("failed to create stdout scanner") + return errors.New("failed to create stdout scanner") } stderrScanner := bufio.NewScanner(stderr) @@ -106,7 +107,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err if stderrScanner == nil { cancel() cmd.Wait() - return fmt.Errorf("failed to create stderr scanner") + return errors.New("failed to create stderr scanner") } t.Go(func() error { return readLine(stdoutscanner, stdoutChan, errChan) @@ -189,7 +190,7 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { } if len(j.config.Filters) == 0 { - return fmt.Errorf("journalctl_filter is required") + return errors.New("journalctl_filter is required") } j.args = append(args, j.config.Filters...) j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, ".")) @@ -223,7 +224,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, qs := strings.TrimPrefix(dsn, "journalctl://") if len(qs) == 0 { - return fmt.Errorf("empty journalctl:// DSN") + return errors.New("empty journalctl:// DSN") } params, err := url.ParseQuery(qs) @@ -236,7 +237,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, j.config.Filters = append(j.config.Filters, value...) case "log_level": if len(value) != 1 { - return fmt.Errorf("expected zero or one value for 'log_level'") + return errors.New("expected zero or one value for 'log_level'") } lvl, err := log.ParseLevel(value[0]) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index f64bb1df306..ca0a7556fca 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -278,7 +278,7 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry) ErrorLogger: kafka.LoggerFunc(logger.Errorf), } if kc.GroupID != "" && kc.Partition != 0 { - return &kafka.Reader{}, fmt.Errorf("cannot specify both group_id and partition") + return &kafka.Reader{}, errors.New("cannot specify both group_id and partition") } if kc.GroupID != "" { rConf.GroupID = kc.GroupID diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 7d27f9e0390..e48a074b764 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -3,6 +3,7 @@ package kubernetesauditacquisition import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -73,15 +74,15 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { ka.config = k8sConfig if ka.config.ListenAddr == "" { - return fmt.Errorf("listen_addr cannot be empty") + return errors.New("listen_addr cannot be empty") } if ka.config.ListenPort == 0 { - return fmt.Errorf("listen_port cannot be empty") + return errors.New("listen_port cannot be empty") } if ka.config.WebhookPath == "" { - return fmt.Errorf("webhook_path cannot be empty") + return errors.New("webhook_path cannot be empty") } if ka.config.WebhookPath[0] != '/' { @@ -119,7 +120,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met } func (ka *KubernetesAuditSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { - return fmt.Errorf("k8s-audit datasource does not support command-line acquisition") + return errors.New("k8s-audit datasource does not support command-line acquisition") } func (ka *KubernetesAuditSource) GetMode() string { @@ -131,7 +132,7 @@ func (ka *KubernetesAuditSource) GetName() string { } func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("k8s-audit datasource does not support one-shot acquisition") + return errors.New("k8s-audit datasource does not support one-shot acquisition") } func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go index 3b59a806b8b..66d842ed519 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go @@ -1,7 +1,7 @@ package rfc3164 import ( - "fmt" + "errors" "time" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils" @@ -52,7 +52,7 @@ func (r *RFC3164) parsePRI() error { pri := 0 if r.buf[r.position] != '<' { - return fmt.Errorf("PRI must start with '<'") + return errors.New("PRI must start with '<'") } r.position++ @@ -64,18 +64,18 @@ func (r *RFC3164) parsePRI() error { break } if c < '0' || c > '9' { - return fmt.Errorf("PRI must be a number") + return errors.New("PRI must be a number") } pri = pri*10 + int(c-'0') r.position++ } if pri > 999 { - return fmt.Errorf("PRI must be up to 3 characters long") + return errors.New("PRI must be up to 3 characters long") } if r.position == r.len && r.buf[r.position-1] != '>' { - return fmt.Errorf("PRI must end with '>'") + return errors.New("PRI must end with '>'") } r.PRI = pri @@ -98,7 +98,7 @@ func (r *RFC3164) parseTimestamp() error { } } if !validTs { - return fmt.Errorf("timestamp is not valid") + return errors.New("timestamp is not valid") } if r.useCurrentYear { if r.Timestamp.Year() == 0 { @@ -122,11 +122,11 @@ func (r *RFC3164) parseHostname() error { } if r.strictHostname { if !utils.IsValidHostnameOrIP(string(hostname)) { - return fmt.Errorf("hostname is not valid") + return errors.New("hostname is not valid") } } if len(hostname) == 0 { - return fmt.Errorf("hostname is empty") + return errors.New("hostname is empty") } r.Hostname = string(hostname) return nil @@ -147,7 +147,7 @@ func (r *RFC3164) parseTag() error { r.position++ } if len(tag) == 0 { - return fmt.Errorf("tag is empty") + return errors.New("tag is empty") } r.Tag = string(tag) @@ -167,7 +167,7 @@ func (r *RFC3164) parseTag() error { break } if c < '0' || c > '9' { - return fmt.Errorf("pid inside tag must be a number") + return errors.New("pid inside tag must be a number") } tmpPid = append(tmpPid, c) r.position++ @@ -175,7 +175,7 @@ func (r *RFC3164) parseTag() error { } if hasPid && !pidEnd { - return fmt.Errorf("pid inside tag must be closed with ']'") + return errors.New("pid inside tag must be closed with ']'") } if hasPid { @@ -191,7 +191,7 @@ func (r *RFC3164) parseMessage() error { } if r.position == r.len { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } c := r.buf[r.position] @@ -202,7 +202,7 @@ func (r *RFC3164) parseMessage() error { for { if r.position >= r.len { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } c := r.buf[r.position] if c != ' ' { @@ -219,7 +219,7 @@ func (r *RFC3164) parseMessage() error { func (r *RFC3164) Parse(message []byte) error { r.len = len(message) if r.len == 0 { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } r.buf = message diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go index 8b71a77e2e3..639e91e1224 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go @@ -1,7 +1,7 @@ package rfc5424 import ( - "fmt" + "errors" "time" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils" @@ -52,7 +52,7 @@ func (r *RFC5424) parsePRI() error { pri := 0 if r.buf[r.position] != '<' { - return fmt.Errorf("PRI must start with '<'") + return errors.New("PRI must start with '<'") } r.position++ @@ -64,18 +64,18 @@ func (r *RFC5424) parsePRI() error { break } if c < '0' || c > '9' { - return fmt.Errorf("PRI must be a number") + return errors.New("PRI must be a number") } pri = pri*10 + int(c-'0') r.position++ } if pri > 999 { - return fmt.Errorf("PRI must be up to 3 characters long") + return errors.New("PRI must be up to 3 characters long") } if r.position == r.len && r.buf[r.position-1] != '>' { - return fmt.Errorf("PRI must end with '>'") + return errors.New("PRI must end with '>'") } r.PRI = pri @@ -84,11 +84,11 @@ func (r *RFC5424) parsePRI() error { func (r *RFC5424) parseVersion() error { if r.buf[r.position] != '1' { - return fmt.Errorf("version must be 1") + return errors.New("version must be 1") } r.position += 2 if r.position >= r.len { - return fmt.Errorf("version must be followed by a space") + return errors.New("version must be followed by a space") } return nil } @@ -113,17 +113,17 @@ func (r *RFC5424) parseTimestamp() error { } if len(timestamp) == 0 { - return fmt.Errorf("timestamp is empty") + return errors.New("timestamp is empty") } if r.position == r.len { - return fmt.Errorf("EOL after timestamp") + return errors.New("EOL after timestamp") } date, err := time.Parse(VALID_TIMESTAMP, string(timestamp)) if err != nil { - return fmt.Errorf("timestamp is not valid") + return errors.New("timestamp is not valid") } r.Timestamp = date @@ -131,7 +131,7 @@ func (r *RFC5424) parseTimestamp() error { r.position++ if r.position >= r.len { - return fmt.Errorf("EOL after timestamp") + return errors.New("EOL after timestamp") } return nil @@ -156,11 +156,11 @@ func (r *RFC5424) parseHostname() error { } if r.strictHostname { if !utils.IsValidHostnameOrIP(string(hostname)) { - return fmt.Errorf("hostname is not valid") + return errors.New("hostname is not valid") } } if len(hostname) == 0 { - return fmt.Errorf("hostname is empty") + return errors.New("hostname is empty") } r.Hostname = string(hostname) return nil @@ -185,11 +185,11 @@ func (r *RFC5424) parseAppName() error { } if len(appname) == 0 { - return fmt.Errorf("appname is empty") + return errors.New("appname is empty") } if len(appname) > 48 { - return fmt.Errorf("appname is too long") + return errors.New("appname is too long") } r.Tag = string(appname) @@ -215,11 +215,11 @@ func (r *RFC5424) parseProcID() error { } if len(procid) == 0 { - return fmt.Errorf("procid is empty") + return errors.New("procid is empty") } if len(procid) > 128 { - return fmt.Errorf("procid is too long") + return errors.New("procid is too long") } r.PID = string(procid) @@ -245,11 +245,11 @@ func (r *RFC5424) parseMsgID() error { } if len(msgid) == 0 { - return fmt.Errorf("msgid is empty") + return errors.New("msgid is empty") } if len(msgid) > 32 { - return fmt.Errorf("msgid is too long") + return errors.New("msgid is too long") } r.MsgID = string(msgid) @@ -263,7 +263,7 @@ func (r *RFC5424) parseStructuredData() error { return nil } if r.buf[r.position] != '[' { - return fmt.Errorf("structured data must start with '[' or be '-'") + return errors.New("structured data must start with '[' or be '-'") } prev := byte(0) for r.position < r.len { @@ -281,14 +281,14 @@ func (r *RFC5424) parseStructuredData() error { } r.position++ if !done { - return fmt.Errorf("structured data must end with ']'") + return errors.New("structured data must end with ']'") } return nil } func (r *RFC5424) parseMessage() error { if r.position == r.len { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } message := []byte{} @@ -305,7 +305,7 @@ func (r *RFC5424) parseMessage() error { func (r *RFC5424) Parse(message []byte) error { r.len = len(message) if r.len == 0 { - return fmt.Errorf("syslog line is empty") + return errors.New("syslog line is empty") } r.buf = message @@ -315,7 +315,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after PRI") + return errors.New("EOL after PRI") } err = r.parseVersion() @@ -324,7 +324,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after Version") + return errors.New("EOL after Version") } err = r.parseTimestamp() @@ -333,7 +333,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after Timestamp") + return errors.New("EOL after Timestamp") } err = r.parseHostname() @@ -342,7 +342,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after hostname") + return errors.New("EOL after hostname") } err = r.parseAppName() @@ -351,7 +351,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after appname") + return errors.New("EOL after appname") } err = r.parseProcID() @@ -360,7 +360,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after ProcID") + return errors.New("EOL after ProcID") } err = r.parseMsgID() @@ -369,7 +369,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after MSGID") + return errors.New("EOL after MSGID") } err = r.parseStructuredData() @@ -378,7 +378,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after SD") + return errors.New("EOL after SD") } err = r.parseMessage() diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 47940c3294c..06c32e62f77 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "errors" "fmt" "net" "strings" @@ -79,11 +80,11 @@ func (s *SyslogSource) GetAggregMetrics() []prometheus.Collector { } func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { - return fmt.Errorf("syslog datasource does not support one shot acquisition") + return errors.New("syslog datasource does not support one shot acquisition") } func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("syslog datasource does not support one shot acquisition") + return errors.New("syslog datasource does not support one shot acquisition") } func validatePort(port int) bool { diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 7c61f2a8dfd..96f977b4738 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -1,6 +1,7 @@ package appsec import ( + "errors" "fmt" "net/http" "os" @@ -164,7 +165,7 @@ func (wc *AppsecConfig) LoadByPath(file string) error { } if wc.Name == "" { - return fmt.Errorf("name cannot be empty") + return errors.New("name cannot be empty") } if wc.LogLevel == nil { lvl := wc.Logger.Logger.GetLevel() diff --git a/pkg/appsec/appsec_rule/appsec_rule.go b/pkg/appsec/appsec_rule/appsec_rule.go index 289405ef161..136d8b11cb7 100644 --- a/pkg/appsec/appsec_rule/appsec_rule.go +++ b/pkg/appsec/appsec_rule/appsec_rule.go @@ -1,6 +1,7 @@ package appsec_rule import ( + "errors" "fmt" ) @@ -48,15 +49,15 @@ type CustomRule struct { func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) { if v.Zones == nil && v.And == nil && v.Or == nil { - return "", nil, fmt.Errorf("no zones defined") + return "", nil, errors.New("no zones defined") } if v.Match.Type == "" && v.And == nil && v.Or == nil { - return "", nil, fmt.Errorf("no match type defined") + return "", nil, errors.New("no match type defined") } if v.Match.Value == "" && v.And == nil && v.Or == nil { - return "", nil, fmt.Errorf("no match value defined") + return "", nil, errors.New("no match value defined") } switch ruleType { diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index a4f5bbc0ed8..2b5d57fbcff 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -2,7 +2,7 @@ package csplugin import ( "context" - "fmt" + "errors" plugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" @@ -35,7 +35,7 @@ func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notific return &protobufs.Empty{}, err case <-ctx.Done(): - return &protobufs.Empty{}, fmt.Errorf("timeout exceeded") + return &protobufs.Empty{}, errors.New("timeout exceeded") } } diff --git a/pkg/csplugin/utils.go b/pkg/csplugin/utils.go index 216a079d457..2e7f0c80528 100644 --- a/pkg/csplugin/utils.go +++ b/pkg/csplugin/utils.go @@ -51,7 +51,7 @@ func getUID(username string) (uint32, error) { return 0, err } if uid < 0 || uid > math.MaxInt32 { - return 0, fmt.Errorf("out of bound uid") + return 0, errors.New("out of bound uid") } return uint32(uid), nil } @@ -66,7 +66,7 @@ func getGID(groupname string) (uint32, error) { return 0, err } if gid < 0 || gid > math.MaxInt32 { - return 0, fmt.Errorf("out of bound gid") + return 0, errors.New("out of bound gid") } return uint32(gid), nil } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index 6a16ad0a65f..7fe8ab59228 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -55,7 +55,6 @@ func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error } if downloadOnly && downloaded { - i.hub.logger.Infof("Downloaded %s", i.Name) return nil } diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 1aebb3caf29..441948c9168 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -171,7 +171,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin downloaded, err := d.Download(ctx, url) if err != nil { - return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err) + return false, "", err } return downloaded, url, nil diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index b4324f8bbab..17ce468f623 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -3,6 +3,7 @@ package exprhelpers import ( "bufio" "encoding/base64" + "errors" "fmt" "math" "net" @@ -772,7 +773,7 @@ func ParseKV(params ...any) (any, error) { matches := keyValuePattern.FindAllStringSubmatch(blob, -1) if matches == nil { log.Errorf("could not find any key/value pair in line") - return nil, fmt.Errorf("invalid input format") + return nil, errors.New("invalid input format") } if _, ok := target[prefix]; !ok { target[prefix] = make(map[string]string) @@ -780,7 +781,7 @@ func ParseKV(params ...any) (any, error) { _, ok := target[prefix].(map[string]string) if !ok { log.Errorf("ParseKV: target is not a map[string]string") - return nil, fmt.Errorf("target is not a map[string]string") + return nil, errors.New("target is not a map[string]string") } } for _, match := range matches { diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index 319544d843d..79e20efbea2 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -11,6 +11,8 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) echo "Pre-downloading Hub content..." +start=$(date +%s%N) + types=$("$CSCLI" hub types -o raw) for itemtype in $types; do @@ -19,9 +21,12 @@ for itemtype in $types; do #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ $ALL_ITEMS \ - --download-only \ - --error + --download-only fi done -echo " done." \ No newline at end of file +elapsed=$((($(date +%s%N) - start)/1000000)) +# bash only does integer arithmetic, we could use bc or have some fun with sed +elapsed=$(echo "$elapsed" | sed -e 's/...$/.&/;t' -e 's/.$/.0&/') + +echo " done in $elapsed secs." diff --git a/test/lib/config/config-global b/test/lib/config/config-global index f77fb3e27bc..9b2b71c1dd1 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -70,7 +70,9 @@ make_init_data() { ./instance-db config-yaml ./instance-db setup - ./bin/preload-hub-items + # preload some content and data files + "$CSCLI" collections install crowdsecurity/linux --download-only + # sub-items did not respect --download-only ./bin/remove-all-hub-items # when installed packages are always using sqlite, so no need to regenerate diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 76bd4c3fbce..f80c26bc652 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -116,7 +116,10 @@ make_init_data() { "$CSCLI" --warning hub update - ./bin/preload-hub-items + # preload some content and data files + "$CSCLI" collections install crowdsecurity/linux --download-only + # sub-items did not respect --download-only + ./bin/remove-all-hub-items # force TCP, the default would be unix socket "$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --url http://127.0.0.1:8080 --auto --force From 6bd4096a3eea0df42e3d82f69a77d06ceef705e1 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Mon, 5 Aug 2024 12:04:07 +0200 Subject: [PATCH 242/318] fix #1742 : Allow QueryCAPIStatus to return as well enrollment status (#3159) * fix #1742 : Allow QueryCAPIStatus to return as well enrollment status --------- Co-authored-by: marco --- cmd/crowdsec-cli/capi.go | 72 +++++++++++++++++++++++++++---------- cmd/crowdsec-cli/support.go | 10 ++++-- 2 files changed, 62 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 6933d355071..1888aa3545a 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -148,28 +148,53 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct -func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { +// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. +func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { + apiURL, err := url.Parse(credURL) if err != nil { - return fmt.Errorf("parsing api url: %w", err) + return false, false, fmt.Errorf("parsing api url: %w", err) } scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) + return false, false, fmt.Errorf("failed to get scenarios: %w", err) } if len(scenarios) == 0 { - return errors.New("no scenarios installed, abort") + return false, false, errors.New("no scenarios installed, abort") } - client, err := apiclient.NewDefaultClient(apiURL, - CAPIURLPrefix, - cwversion.UserAgent(), - nil) + passwd := strfmt.Password(password) + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: login, + Password: passwd, + Scenarios: scenarios, + UserAgent: cwversion.UserAgent(), + URL: apiURL, + //I don't believe papi is neede to check enrollement + //PapiURL: papiURL, + VersionPrefix: "v3", + UpdateScenario: func() ([]string, error) { + l_scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) + if err != nil { + return nil, err + } + appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) + if err != nil { + return nil, err + } + ret := make([]string, 0, len(l_scenarios)+len(appsecRules)) + ret = append(ret, l_scenarios...) + ret = append(ret, appsecRules...) + + return ret, nil + }, + }) + if err != nil { - return fmt.Errorf("init default client: %w", err) + return false, false, fmt.Errorf("new client api: %w", err) } pw := strfmt.Password(password) @@ -180,12 +205,18 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: scenarios, } - _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) + authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { - return err + return false, false, err } - return nil + client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token + + if client.IsEnrolled() { + return true, true, nil + } + return true, false, nil + } func (cli *cliCapi) status() error { @@ -205,12 +236,17 @@ func (cli *cliCapi) status() error { log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) - if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { - return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) - } - - log.Info("You can successfully interact with Central API (CAPI)") + auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if err != nil { + return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) + } + if auth { + log.Info("You can successfully interact with Central API (CAPI)") + } + if enrolled { + log.Info("Your instance is enrolled in the console") + } return nil } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 324be2710fd..ef14f90df17 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -260,11 +260,17 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if err != nil { return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) } + if auth { + fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + } - fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + if enrolled { + fmt.Fprintln(out, "Your instance is enrolled in the console") + } cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) From 1bc3b0870b8b9ca20c5027750fef1a6599a28bc7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 7 Aug 2024 12:45:54 +0200 Subject: [PATCH 243/318] command "cscli doc --target /path/to/dir" (#3169) * command "cscli doc --target /path/to/dir" * typos and improved messages * CI: remove obsolete parameters for golangi-lint action * lint --- .github/workflows/go-tests-windows.yml | 4 ---- .github/workflows/go-tests.yml | 4 ---- .golangci.yml | 6 +----- cmd/crowdsec-cli/dashboard.go | 11 +++++----- cmd/crowdsec-cli/doc.go | 20 ++++++++++++++----- .../wineventlog/wineventlog_windows.go | 4 ++-- pkg/csplugin/utils_windows.go | 5 +++-- pkg/cwhub/iteminstall.go | 4 ++-- test/bats/01_cscli.bats | 14 +++++++++---- test/bats/20_hub_collections.bats | 2 +- test/bats/20_hub_parsers.bats | 2 +- test/bats/20_hub_postoverflows.bats | 2 +- test/bats/20_hub_scenarios.bats | 2 +- 13 files changed, 43 insertions(+), 37 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 9d8051821c8..b99291f06b7 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -60,7 +60,3 @@ jobs: version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false - # the cache is already managed above, enabling it here - # gives errors when extracting - skip-pkg-cache: true - skip-build-cache: true diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index ab45bd88717..a7c34adfe50 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -161,7 +161,3 @@ jobs: version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false - # the cache is already managed above, enabling it here - # gives errors when extracting - skip-pkg-cache: true - skip-build-cache: true diff --git a/.golangci.yml b/.golangci.yml index 2ac39793731..fb1dab623c1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -271,7 +271,7 @@ linters: # - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted. + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occasions, where the check for the returned error can be omitted. - exhaustive # check exhaustiveness of enum switch statements - gci # Gci control golang package import order and make it always deterministic. - godot # Check if comments end in a period @@ -387,10 +387,6 @@ issues: - perfsprint text: "fmt.Sprintf can be replaced .*" - - linters: - - perfsprint - text: "fmt.Errorf can be replaced with errors.New" - # # Will fix, easy but some neurons required # diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index beff06d478a..96a2fa38cb7 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -3,6 +3,7 @@ package main import ( + "errors" "fmt" "math" "os" @@ -277,7 +278,7 @@ cscli dashboard remove --force return fmt.Errorf("unable to ask to force: %s", err) } if !answer { - return fmt.Errorf("user stated no to continue") + return errors.New("user stated no to continue") } } if metabase.IsContainerExist(metabaseContainerID) { @@ -289,7 +290,7 @@ cscli dashboard remove --force if err == nil { // if group exist, remove it groupDelCmd, err := exec.LookPath("groupdel") if err != nil { - return fmt.Errorf("unable to find 'groupdel' command, can't continue") + return errors.New("unable to find 'groupdel' command, can't continue") } groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}} @@ -366,7 +367,7 @@ func checkSystemMemory(forceYes *bool) error { } if !answer { - return fmt.Errorf("user stated no to continue") + return errors.New("user stated no to continue") } return nil @@ -399,7 +400,7 @@ func disclaimer(forceYes *bool) error { } if !answer { - return fmt.Errorf("user stated no to responsibilities") + return errors.New("user stated no to responsibilities") } return nil @@ -435,7 +436,7 @@ func checkGroups(forceYes *bool) (*user.Group, error) { groupAddCmd, err := exec.LookPath("groupadd") if err != nil { - return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue") + return dockerGroup, errors.New("unable to find 'groupadd' command, can't continue") } groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}} diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index db1e642115e..f68d535db03 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -16,20 +16,30 @@ func NewCLIDoc() *cliDoc { } func (cli cliDoc) NewCommand(rootCmd *cobra.Command) *cobra.Command { + var target string + + const defaultTarget = "./doc" + cmd := &cobra.Command{ Use: "doc", - Short: "Generate the documentation in `./doc/`. Directory must exist.", - Args: cobra.ExactArgs(0), + Short: "Generate the documentation related to cscli commands. Target directory must exist.", + Args: cobra.NoArgs, Hidden: true, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - if err := doc.GenMarkdownTreeCustom(rootCmd, "./doc/", cli.filePrepender, cli.linkHandler); err != nil { - return fmt.Errorf("failed to generate cobra doc: %w", err) + RunE: func(_ *cobra.Command, args []string) error { + if err := doc.GenMarkdownTreeCustom(rootCmd, target, cli.filePrepender, cli.linkHandler); err != nil { + return fmt.Errorf("failed to generate cscli documentation: %w", err) } + + fmt.Println("Documentation generated in", target) + return nil }, } + flags := cmd.Flags() + flags.StringVar(&target, "target", defaultTarget, "The target directory where the documentation will be generated") + return cmd } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 8adbf1e53c5..c6b10b7c38c 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -246,11 +246,11 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { } if w.config.EventChannel != "" && w.config.XPathQuery != "" { - return fmt.Errorf("event_channel and xpath_query are mutually exclusive") + return errors.New("event_channel and xpath_query are mutually exclusive") } if w.config.EventChannel == "" && w.config.XPathQuery == "" { - return fmt.Errorf("event_channel or xpath_query must be set") + return errors.New("event_channel or xpath_query must be set") } w.config.Mode = configuration.TAIL_MODE diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index 2870a2addb5..8d4956ceeeb 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -3,6 +3,7 @@ package csplugin import ( + "errors" "fmt" "os" "os/exec" @@ -77,14 +78,14 @@ func CheckPerms(path string) error { return fmt.Errorf("while getting owner security info: %w", err) } if !sd.IsValid() { - return fmt.Errorf("security descriptor is invalid") + return errors.New("security descriptor is invalid") } owner, _, err := sd.Owner() if err != nil { return fmt.Errorf("while getting owner: %w", err) } if !owner.IsValid() { - return fmt.Errorf("owner is invalid") + return errors.New("owner is invalid") } if !owner.Equals(systemSid) && !owner.Equals(currentUserSid) && !owner.Equals(adminSid) { diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index 7fe8ab59228..912897d0d7e 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -9,11 +9,11 @@ import ( func (i *Item) enable() error { if i.State.Installed { if i.State.Tainted { - return fmt.Errorf("%s is tainted, won't enable unless --force", i.Name) + return fmt.Errorf("%s is tainted, won't overwrite unless --force", i.Name) } if i.State.IsLocal() { - return fmt.Errorf("%s is local, won't enable", i.Name) + return fmt.Errorf("%s is local, won't overwrite", i.Name) } // if it's a collection, check sub-items even if the collection file itself is up-to-date diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 792274cc4f4..27cfe53212b 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -366,16 +366,14 @@ teardown() { } @test "cscli doc" { - # generating documentation requires a directory named "doc" - cd "$BATS_TEST_TMPDIR" rune -1 cscli doc refute_output - assert_stderr --regexp 'failed to generate cobra doc: open doc/.*: no such file or directory' + assert_stderr --regexp 'failed to generate cscli documentation: open doc/.*: no such file or directory' mkdir -p doc rune -0 cscli doc - refute_output + assert_output "Documentation generated in ./doc" refute_stderr assert_file_exists "doc/cscli.md" assert_file_not_exist "doc/cscli_setup.md" @@ -385,6 +383,14 @@ teardown() { export CROWDSEC_FEATURE_CSCLI_SETUP="true" rune -0 cscli doc assert_file_exists "doc/cscli_setup.md" + + # specify a target directory + mkdir -p "$BATS_TEST_TMPDIR/doc2" + rune -0 cscli doc --target "$BATS_TEST_TMPDIR/doc2" + assert_output "Documentation generated in $BATS_TEST_TMPDIR/doc2" + refute_stderr + assert_file_exists "$BATS_TEST_TMPDIR/doc2/cscli_setup.md" + } @test "feature.yaml for subcommands" { diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats index 1381fe8e55a..6822339ae40 100644 --- a/test/bats/20_hub_collections.bats +++ b/test/bats/20_hub_collections.bats @@ -177,7 +177,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" rune -1 cscli collections install crowdsecurity/sshd - assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't overwrite unless --force" rune -0 cscli collections install crowdsecurity/sshd --force assert_stderr --partial "Enabled crowdsecurity/sshd" diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats index 214463f9cfc..791b1a2177f 100644 --- a/test/bats/20_hub_parsers.bats +++ b/test/bats/20_hub_parsers.bats @@ -177,7 +177,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" rune -1 cscli parsers install crowdsecurity/whitelists - assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't overwrite unless --force" rune -0 cscli parsers install crowdsecurity/whitelists --force assert_stderr --partial "Enabled crowdsecurity/whitelists" diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats index 5123966a44e..37337b08caa 100644 --- a/test/bats/20_hub_postoverflows.bats +++ b/test/bats/20_hub_postoverflows.bats @@ -177,7 +177,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" rune -1 cscli postoverflows install crowdsecurity/rdns - assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't overwrite unless --force" rune -0 cscli postoverflows install crowdsecurity/rdns --force assert_stderr --partial "Enabled crowdsecurity/rdns" diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 4e4b28e7703..3ab3d944c93 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -178,7 +178,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" rune -1 cscli scenarios install crowdsecurity/ssh-bf - assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't overwrite unless --force" rune -0 cscli scenarios install crowdsecurity/ssh-bf --force assert_stderr --partial "Enabled crowdsecurity/ssh-bf" From 3532e872d350d224e1fd247cfbfe6116620604d5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:46:39 +0200 Subject: [PATCH 244/318] metrics: avoid nil deref with inactive bouncers or malformed response (#3170) * metrics: avoid nil deref with inactive bouncers * log message from API even it if cannot be parsed * fix unit test --- pkg/apiclient/client_test.go | 2 +- pkg/apiclient/resperr.go | 19 +++++++++++++----- pkg/apiserver/apic_metrics.go | 31 +++++++++++++++++++++--------- pkg/apiserver/apic_metrics_test.go | 3 +++ pkg/apiserver/apic_test.go | 14 +++++++++++++- pkg/apiserver/apiserver_test.go | 19 +++++++++--------- 6 files changed, 63 insertions(+), 25 deletions(-) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index dd09811924f..bd83e512afc 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -348,5 +348,5 @@ func TestNewClientBadAnswer(t *testing.T) { URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) - cstest.RequireErrorContains(t, err, "invalid body: bad") + cstest.RequireErrorContains(t, err, "API error: http code 401, response: bad") } diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index e8f12ee9f4e..00689147332 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -16,12 +16,18 @@ type ErrorResponse struct { } func (e *ErrorResponse) Error() string { - err := fmt.Sprintf("API error: %s", *e.Message) + message := ptr.OrEmpty(e.Message) + errors := "" + if len(e.Errors) > 0 { - err += fmt.Sprintf(" (%s)", e.Errors) + errors = fmt.Sprintf(" (%s)", e.Errors) + } + + if message == "" && errors == "" { + errors = "(no errors)" } - return err + return fmt.Sprintf("API error: %s%s", message, errors) } // CheckResponse verifies the API response and builds an appropriate Go error if necessary. @@ -42,8 +48,11 @@ func CheckResponse(r *http.Response) error { case http.StatusUnprocessableEntity: ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid request: %s", r.StatusCode, string(data))) default: - if err := json.Unmarshal(data, ret); err != nil { - ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid body: %s", r.StatusCode, string(data))) + // try to unmarshal and if there are no 'message' or 'errors' fields, display the body as is, + // the API is following a different convention + err := json.Unmarshal(data, ret) + if err != nil || (ret.Message == nil && len(ret.Errors) == 0) { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, response: %s", r.StatusCode, string(data))) return ret } } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 54640afc2d0..176984f1ad6 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -54,7 +54,11 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") rcMetrics.Version = ptr.Of(bouncer.Version) rcMetrics.Name = bouncer.Name - rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + + rcMetrics.LastPull = 0 + if bouncer.LastPull != nil { + rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + } rcMetrics.Metrics = make([]*models.DetailedMetrics, 0) @@ -92,14 +96,19 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") lpMetrics.Version = ptr.Of(lp.Version) lpMetrics.Name = lp.MachineId - lpMetrics.LastPush = lp.LastPush.UTC().Unix() - lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + lpMetrics.LastPush = 0 + if lp.LastPush != nil { + lpMetrics.LastPush = lp.LastPush.UTC().Unix() + } + + lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() lpMetrics.Datasources = lp.Datasources + hubItems := models.HubItems{} + if lp.Hubstate != nil { // must carry over the hub state even if nothing is installed - hubItems := models.HubItems{} for itemType, items := range lp.Hubstate { hubItems[itemType] = []models.HubItem{} for _, item := range items { @@ -109,13 +118,11 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { Version: item.Version, }) } - - lpMetrics.HubItems = hubItems } - } else { - lpMetrics.HubItems = models.HubItems{} } + lpMetrics.HubItems = hubItems + lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) for _, dbMetric := range dbMetrics { @@ -203,11 +210,16 @@ func (a *apic) GetMetrics() (*models.Metrics, error) { bouncersInfo := make([]*models.MetricsBouncerInfo, len(bouncers)) for i, bouncer := range bouncers { + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.Format(time.RFC3339) + } + bouncersInfo[i] = &models.MetricsBouncerInfo{ Version: bouncer.Version, CustomName: bouncer.Name, Name: bouncer.Type, - LastPull: bouncer.LastPull.Format(time.RFC3339), + LastPull: lastPull, } } @@ -330,6 +342,7 @@ func (a *apic) SendUsageMetrics() { firstRun := true + log.Debugf("Start sending usage metrics to CrowdSec Central API (interval: %s once, then %s)", a.usageMetricsIntervalFirst, a.usageMetricsInterval) ticker := time.NewTicker(a.usageMetricsIntervalFirst) for { diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 15ad63b391b..d1e48ac90a3 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -84,8 +84,11 @@ func TestAPICSendMetrics(t *testing.T) { tc.setUp(api) stop := make(chan bool) + httpmock.ZeroCallCounters() + go api.SendMetrics(stop) + time.Sleep(tc.duration) stop <- true diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a3aa956ed98..546a236251f 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1113,12 +1113,16 @@ func TestAPICPush(t *testing.T) { require.NoError(t, err) api.apiClient = apic + httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{})) + go func() { api.AlertsAddChan <- tc.alerts + time.Sleep(time.Second) api.Shutdown() }() + err = api.Push() require.NoError(t, err) assert.Equal(t, tc.expectedCalls, httpmock.GetTotalCallCount()) @@ -1161,7 +1165,9 @@ func TestAPICPull(t *testing.T) { url, err := url.ParseRequestURI("http://api.crowdsec.net/") require.NoError(t, err) httpmock.Activate() + defer httpmock.DeactivateAndReset() + apic, err := apiclient.NewDefaultClient( url, "/api", @@ -1169,7 +1175,9 @@ func TestAPICPull(t *testing.T) { nil, ) require.NoError(t, err) + api.apiClient = apic + httpmock.RegisterNoResponder(httpmock.NewBytesResponder(200, jsonMarshalX( modelscapi.GetDecisionsStreamResponse{ New: modelscapi.GetDecisionsStreamResponseNew{ @@ -1187,14 +1195,18 @@ func TestAPICPull(t *testing.T) { }, ))) tc.setUp() + var buf bytes.Buffer + go func() { logrus.SetOutput(&buf) + if err := api.Pull(); err != nil { panic(err) } }() - //Slightly long because the CI runner for windows are slow, and this can lead to random failure + + // Slightly long because the CI runner for windows are slow, and this can lead to random failure time.Sleep(time.Millisecond * 500) logrus.SetOutput(os.Stderr) assert.Contains(t, buf.String(), tc.logContains) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 26531a592da..b3f619f39c1 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -29,15 +29,16 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var testMachineID = "test" -var testPassword = strfmt.Password("test") -var MachineTest = models.WatcherAuthRequest{ - MachineID: &testMachineID, - Password: &testPassword, -} - -var UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) -var emptyBody = strings.NewReader("") +var ( + testMachineID = "test" + testPassword = strfmt.Password("test") + MachineTest = models.WatcherAuthRequest{ + MachineID: &testMachineID, + Password: &testPassword, + } + UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) + emptyBody = strings.NewReader("") +) func LoadTestConfig(t *testing.T) csconfig.Config { config := csconfig.Config{} From 52995b8fa456a03b0761ce9aa2dcbcc2427e62ae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:15:00 +0200 Subject: [PATCH 245/318] prevent nil deref when loading cti config (#3176) * correct variable names * prevent nil deref when loading cti config --- pkg/apiserver/apic.go | 4 ++-- pkg/csconfig/api.go | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 284d0acdabf..5b850cbff0d 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -42,7 +42,7 @@ const ( metricsIntervalDefault = time.Minute * 30 metricsIntervalDelta = time.Minute * 15 usageMetricsInterval = time.Minute * 30 - usageMetricsIntervalFirst = time.Minute * 15 + usageMetricsIntervalDelta = time.Minute * 15 ) type apic struct { @@ -196,7 +196,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con metricsInterval: metricsIntervalDefault, metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), usageMetricsInterval: usageMetricsInterval, - usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalFirst), + usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalDelta), isPulling: make(chan bool, 1), whitelists: apicWhitelist, } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index fb29111ecd5..a23df957282 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -61,7 +61,7 @@ type CTICfg struct { func (a *CTICfg) Load() error { if a.Key == nil { - *a.Enabled = false + a.Enabled = ptr.Of(false) } if a.Key != nil && *a.Key == "" { @@ -69,8 +69,7 @@ func (a *CTICfg) Load() error { } if a.Enabled == nil { - a.Enabled = new(bool) - *a.Enabled = true + a.Enabled = ptr.Of(true) } if a.CacheTimeout == nil { From 71a253aea6a8cf8b25ed724f51a88a7859f8c807 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 12 Aug 2024 09:34:45 +0100 Subject: [PATCH 246/318] Remove useragent set by RC (#3167) Fix #3166 --- pkg/appsec/request.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index d0e3632abda..ccd7a9f9cc8 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -331,8 +331,9 @@ func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequ originalHTTPRequest.Header.Set("User-Agent", userAgent) r.Header.Set("User-Agent", userAgent) //Override the UA in the original request, as this is what will be used by the waf engine } else { - //If we don't have a forwarded UA, delete the one that was set by the bouncer + //If we don't have a forwarded UA, delete the one that was set by the remediation in both original and incoming originalHTTPRequest.Header.Del("User-Agent") + r.Header.Del("User-Agent") } parsedURL, err := url.Parse(clientURI) From 48e3f51954defbb3592fedb38e04529f94b3e409 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 13 Aug 2024 16:49:27 +0200 Subject: [PATCH 247/318] cscli: add option --ignore-missing to "bouncers delete", "machines delete" (#3177) * cscli: add option --ignore-missing to "bouncers delete", "machines delete" * lint --- cmd/crowdsec-cli/bouncers.go | 20 +++++++++++++++----- cmd/crowdsec-cli/machines.go | 17 ++++++++++++++--- pkg/database/bouncers.go | 10 +++++++++- pkg/database/machines.go | 22 +++++++++++++++------- test/bats/10_bouncers.bats | 8 ++++++++ test/bats/30_machines.bats | 8 ++++++++ 6 files changed, 69 insertions(+), 16 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 8057cba98c9..d3edcea0db9 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -344,11 +344,15 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliBouncers) delete(bouncers []string) error { +func (cli *cliBouncers) delete(bouncers []string, ignoreMissing bool) error { for _, bouncerID := range bouncers { - err := cli.db.DeleteBouncer(bouncerID) - if err != nil { - return fmt.Errorf("unable to delete bouncer '%s': %w", bouncerID, err) + if err := cli.db.DeleteBouncer(bouncerID); err != nil { + var notFoundErr *database.BouncerNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + return fmt.Errorf("unable to delete bouncer: %w", err) } log.Infof("bouncer '%s' deleted successfully", bouncerID) @@ -358,18 +362,24 @@ func (cli *cliBouncers) delete(bouncers []string) error { } func (cli *cliBouncers) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + cmd := &cobra.Command{ Use: "delete MyBouncerName", Short: "delete bouncer(s) from the database", + Example: `cscli bouncers delete "bouncer1" "bouncer2"`, Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validBouncerID, RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args) + return cli.delete(args, ignoreMissing) }, } + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more bouncers don't exist") + return cmd } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index e50a17f02dc..dcdb1963b49 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -488,10 +488,16 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliMachines) delete(machines []string) error { +func (cli *cliMachines) delete(machines []string, ignoreMissing bool) error { for _, machineID := range machines { if err := cli.db.DeleteWatcher(machineID); err != nil { - log.Errorf("unable to delete machine '%s': %s", machineID, err) + var notFoundErr *database.MachineNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + log.Errorf("unable to delete machine: %s", err) + return nil } @@ -502,6 +508,8 @@ func (cli *cliMachines) delete(machines []string) error { } func (cli *cliMachines) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + cmd := &cobra.Command{ Use: "delete [machine_name]...", Short: "delete machine(s) by name", @@ -511,10 +519,13 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args) + return cli.delete(args, ignoreMissing) }, } + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more machines don't exist") + return cmd } diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index ff750e63c59..f79e9580afe 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -12,6 +12,14 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +type BouncerNotFoundError struct { + BouncerName string +} + +func (e *BouncerNotFoundError) Error() string { + return fmt.Sprintf("'%s' does not exist", e.BouncerName) +} + func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -88,7 +96,7 @@ func (c *Client) DeleteBouncer(name string) error { } if nbDeleted == 0 { - return errors.New("bouncer doesn't exist") + return &BouncerNotFoundError{BouncerName: name} } return nil diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 21349b8b687..75b0ee5fdaa 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -21,6 +21,14 @@ const ( CapiListsMachineID = types.ListOrigin ) +type MachineNotFoundError struct { + MachineID string +} + +func (e *MachineNotFoundError) Error() string { + return fmt.Sprintf("'%s' does not exist", e.MachineID) +} + func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -168,7 +176,7 @@ func (c *Client) DeleteWatcher(name string) error { } if nbDeleted == 0 { - return errors.New("machine doesn't exist") + return &MachineNotFoundError{MachineID: name} } return nil @@ -197,8 +205,8 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { return nil } -func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { - _, err := c.Ent.Machine.UpdateOneID(ID). +func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { + _, err := c.Ent.Machine.UpdateOneID(id). SetUpdatedAt(time.Now().UTC()). SetScenarios(scenarios). Save(c.CTX) @@ -209,8 +217,8 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { return nil } -func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { - _, err := c.Ent.Machine.UpdateOneID(ID). +func (c *Client) UpdateMachineIP(ipAddr string, id int) error { + _, err := c.Ent.Machine.UpdateOneID(id). SetIpAddress(ipAddr). Save(c.CTX) if err != nil { @@ -220,8 +228,8 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { return nil } -func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { - _, err := c.Ent.Machine.UpdateOneID(ID). +func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { + _, err := c.Ent.Machine.UpdateOneID(id). SetVersion(ipAddr). Save(c.CTX) if err != nil { diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index a89c9f9dd65..f99913dcee5 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -60,6 +60,14 @@ teardown() { assert_json '{message:"access forbidden"}' } +@test "delete non-existent bouncer" { + # this is a fatal error, which is not consistent with "machines delete" + rune -1 cscli bouncers delete something + assert_stderr --partial "unable to delete bouncer: 'something' does not exist" + rune -0 cscli bouncers delete something --ignore-missing + refute_stderr +} + @test "bouncers delete has autocompletion" { rune -0 cscli bouncers add foo1 rune -0 cscli bouncers add foo2 diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index f8b63fb3173..d4cce67d0b0 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,6 +62,14 @@ teardown() { assert_output 1 } +@test "delete non-existent machine" { + # this is not a fatal error, won't halt a script with -e + rune -0 cscli machines delete something + assert_stderr --partial "unable to delete machine: 'something' does not exist" + rune -0 cscli machines delete something --ignore-missing + refute_stderr +} + @test "machines [delete|inspect] has autocompletion" { rune -0 cscli machines add -a -f /dev/null foo1 rune -0 cscli machines add -a -f /dev/null foo2 From 64d2ea0ddb49369a323471f1e4984c502ebe135f Mon Sep 17 00:00:00 2001 From: Alexander Mnich <56564725+a-mnich@users.noreply.github.com> Date: Fri, 16 Aug 2024 21:48:14 +0200 Subject: [PATCH 248/318] add missing ApiKeyAuthorizer to decisions swagger endpoint (#3178) --- pkg/models/localapi_swagger.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index ba14880e7c5..9edfd12b82f 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -160,6 +160,8 @@ paths: description: "400 response" schema: $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] head: description: Returns information about existing decisions summary: GetDecisions From 2fbc214538615268cf9122f7d9e449b9f96986bc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 Aug 2024 09:54:58 +0200 Subject: [PATCH 249/318] CI: use go 1.22.6 (#3171) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index be6fc0b8a73..d1cfb026cdc 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 3fcc5a6a209..e0b6068e44b 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 76342a47603..0d2012708a0 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 87e60b071d5..3c0ae4785ad 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 9860a171233..03cdb4bd871 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 982ecedbb25..42b52490ea8 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index b99291f06b7..5a463bab99c 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index a7c34adfe50..58b8dc61a0d 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 72211cb5180..2f809a29a9b 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index b9569065137..d30004e4683 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.5-alpine3.20 AS build +FROM golang:1.22.6-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index a256cfa9ab3..4fda8c26599 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.5-bookworm AS build +FROM golang:1.22.6-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d73637fff6b..0ceb9e5cffc 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.5' + version: '1.22.6' - pwsh: | choco install -y make From 534fb14f7b91d3b098c4ccd214653e5ac058edb4 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 19 Aug 2024 10:07:06 +0200 Subject: [PATCH 250/318] hide geoip related warnings (#3179) --- pkg/parser/enrich_geoip.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 58732129af8..1756927bc4b 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -25,7 +25,7 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string } if r == nil { - plog.Warnf("No range found for ip '%s'", field) + plog.Debugf("No range found for ip '%s'", field) return nil, nil } @@ -49,10 +49,15 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, r, err := exprhelpers.GeoIPASNEnrich(field) if err != nil { - plog.Errorf("Unable to enrich ip '%s'", field) + plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + if r == nil { + plog.Debugf("No ASN found for ip '%s'", field) + return nil, nil + } + record, ok := r.(*geoip2.ASN) if !ok { @@ -78,10 +83,15 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string r, err := exprhelpers.GeoIPEnrich(field) if err != nil { - plog.Errorf("Unable to enrich ip '%s'", field) + plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + if r == nil { + plog.Debugf("No city found for ip '%s'", field) + return nil, nil + } + record, ok := r.(*geoip2.City) if !ok { From 6b9b19b8c1380617bcec2cf36099d80fc5270c79 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 Aug 2024 14:09:17 +0200 Subject: [PATCH 251/318] CI: use golangci-lint 1.60 (#3180) * use golangci-lint 1.60 * lint (gosimple/S1009) -> len(nil) == 0 * nerf govet for now --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 1 + cmd/notification-email/main.go | 2 +- pkg/acquisition/modules/appsec/appsec_runner.go | 2 +- pkg/apiserver/middlewares/v1/ocsp.go | 2 +- pkg/cwhub/hub.go | 2 +- pkg/database/machines.go | 2 +- 8 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 5a463bab99c..6e623b26e09 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -57,6 +57,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59 + version: v1.60 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 58b8dc61a0d..dfdbd8ab0e9 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -158,6 +158,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59 + version: v1.60 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index fb1dab623c1..f12d7086b62 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,6 +17,7 @@ linters-settings: disable: - reflectvaluecompare - fieldalignment + - printf maintidx: # raise this after refactoring diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 3b535ae7ffa..2707b7fe1af 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -81,7 +81,7 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( return nil, errors.New("SMTP host is not set") } - if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 { + if len(d.ReceiverEmails) == 0 { return nil, errors.New("receiver emails are not set") } diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index ed49d6a7b41..de34b62d704 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -167,7 +167,7 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap return nil } - if request.Body != nil && len(request.Body) > 0 { + if len(request.Body) > 0 { in, _, err = request.Tx.WriteRequestBody(request.Body) if err != nil { r.logger.Errorf("unable to write request body : %s", err) diff --git a/pkg/apiserver/middlewares/v1/ocsp.go b/pkg/apiserver/middlewares/v1/ocsp.go index 24557bfda7b..0b6406ad0e7 100644 --- a/pkg/apiserver/middlewares/v1/ocsp.go +++ b/pkg/apiserver/middlewares/v1/ocsp.go @@ -70,7 +70,7 @@ func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509 // It returns a boolean indicating if the certificate is revoked and a boolean indicating // if the OCSP check was successful and could be cached. func (oc *OCSPChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { - if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { + if len(cert.OCSPServer) == 0 { oc.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") return false, true } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 1293d6fa235..20a628a493f 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -153,7 +153,7 @@ func (h *Hub) ItemStats() []string { // Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() // unless the hub is completely empty. func (h *Hub) Update(ctx context.Context) error { - if h.pathIndex != nil && len(h.pathIndex) > 0 { + if len(h.pathIndex) > 0 { // if this happens, it's a bug. return errors.New("cannot update hub after items have been loaded") } diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 75b0ee5fdaa..3c8cbabbfa7 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -35,7 +35,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B var heartbeat time.Time - if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { + if len(baseMetrics.Metrics) == 0 { heartbeat = time.Now().UTC() } else { heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) From 0e567a9a1867f17f007f24366edaf1536acb5e91 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 09:48:36 +0200 Subject: [PATCH 252/318] CI: go - don't pin minor version (always use latest) (#3181) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index d1cfb026cdc..473af9b1312 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index e0b6068e44b..211d856bc34 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 0d2012708a0..aec707f0c03 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 3c0ae4785ad..c1ae9077310 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 03cdb4bd871..a37aa43e2d0 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 42b52490ea8..2715c6590c3 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 6e623b26e09..e70d6e352f1 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index dfdbd8ab0e9..3709c695231 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 2f809a29a9b..eeefb801719 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index d30004e4683..d4c8978c9ae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.6-alpine3.20 AS build +FROM golang:1.22-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 4fda8c26599..73a44e8a4b8 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.6-bookworm AS build +FROM golang:1.22-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0ceb9e5cffc..6051ca67393 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.6' + version: '1.22' - pwsh: | choco install -y make From e7b54c68c549608c48bceae1d4f29486cbb2ab49 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 15:47:28 +0200 Subject: [PATCH 253/318] refact (nestif): reduce complexity of pkg/leakybucket (#3139) * refact (nestif): extract functions in pkg/leakybucket/overflows.go * refact (nestif): extract functions in pkg/leakybucket/manager_load.go * lint --- .golangci.yml | 2 +- pkg/leakybucket/manager_load.go | 183 +++++++++++++++++++------------- pkg/leakybucket/overflows.go | 103 ++++++++++-------- 3 files changed, 171 insertions(+), 117 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f12d7086b62..6da59142691 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -28,7 +28,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 24 + min-complexity: 20 nlreturn: block-size: 5 diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index ca2e4d17d99..1d523759f2b 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -79,84 +79,125 @@ type BucketFactory struct { // we use one NameGenerator for all the future buckets var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) -func ValidateFactory(bucketFactory *BucketFactory) error { - if bucketFactory.Name == "" { - return errors.New("bucket must have name") +func validateLeakyType(bucketFactory *BucketFactory) error { + if bucketFactory.Capacity <= 0 { // capacity must be a positive int + return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) } - if bucketFactory.Description == "" { - return errors.New("description is mandatory") + if bucketFactory.LeakSpeed == "" { + return errors.New("leakspeed can't be empty for leaky") } - if bucketFactory.Type == "leaky" { - if bucketFactory.Capacity <= 0 { // capacity must be a positive int - return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) - } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) + } - if bucketFactory.LeakSpeed == "" { - return errors.New("leakspeed can't be empty for leaky") - } + return nil +} - if bucketFactory.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) - } - } else if bucketFactory.Type == "counter" { - if bucketFactory.Duration == "" { - return errors.New("duration can't be empty for counter") - } +func validateCounterType(bucketFactory *BucketFactory) error { + if bucketFactory.Duration == "" { + return errors.New("duration can't be empty for counter") + } - if bucketFactory.duration == 0 { - return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) - } + if bucketFactory.duration == 0 { + return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) + } - if bucketFactory.Capacity != -1 { - return errors.New("counter bucket must have -1 capacity") - } - } else if bucketFactory.Type == "trigger" { - if bucketFactory.Capacity != 0 { - return errors.New("trigger bucket must have 0 capacity") - } - } else if bucketFactory.Type == "conditional" { - if bucketFactory.ConditionalOverflow == "" { - return errors.New("conditional bucket must have a condition") - } + if bucketFactory.Capacity != -1 { + return errors.New("counter bucket must have -1 capacity") + } - if bucketFactory.Capacity != -1 { - bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") - } + return nil +} - if bucketFactory.LeakSpeed == "" { - return errors.New("leakspeed can't be empty for conditional bucket") - } +func validateTriggerType(bucketFactory *BucketFactory) error { + if bucketFactory.Capacity != 0 { + return errors.New("trigger bucket must have 0 capacity") + } - if bucketFactory.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) - } - } else if bucketFactory.Type == "bayesian" { - if bucketFactory.BayesianConditions == nil { - return errors.New("bayesian bucket must have bayesian conditions") - } + return nil +} - if bucketFactory.BayesianPrior == 0 { - return errors.New("bayesian bucket must have a valid, non-zero prior") - } +func validateConditionalType(bucketFactory *BucketFactory) error { + if bucketFactory.ConditionalOverflow == "" { + return errors.New("conditional bucket must have a condition") + } - if bucketFactory.BayesianThreshold == 0 { - return errors.New("bayesian bucket must have a valid, non-zero threshold") - } + if bucketFactory.Capacity != -1 { + bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") + } - if bucketFactory.BayesianPrior > 1 { - return errors.New("bayesian bucket must have a valid, non-zero prior") - } + if bucketFactory.LeakSpeed == "" { + return errors.New("leakspeed can't be empty for conditional bucket") + } - if bucketFactory.BayesianThreshold > 1 { - return errors.New("bayesian bucket must have a valid, non-zero threshold") - } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) + } - if bucketFactory.Capacity != -1 { - return errors.New("bayesian bucket must have capacity -1") + return nil +} + +func validateBayesianType(bucketFactory *BucketFactory) error { + if bucketFactory.BayesianConditions == nil { + return errors.New("bayesian bucket must have bayesian conditions") + } + + if bucketFactory.BayesianPrior == 0 { + return errors.New("bayesian bucket must have a valid, non-zero prior") + } + + if bucketFactory.BayesianThreshold == 0 { + return errors.New("bayesian bucket must have a valid, non-zero threshold") + } + + if bucketFactory.BayesianPrior > 1 { + return errors.New("bayesian bucket must have a valid, non-zero prior") + } + + if bucketFactory.BayesianThreshold > 1 { + return errors.New("bayesian bucket must have a valid, non-zero threshold") + } + + if bucketFactory.Capacity != -1 { + return errors.New("bayesian bucket must have capacity -1") + } + + return nil +} + +func ValidateFactory(bucketFactory *BucketFactory) error { + if bucketFactory.Name == "" { + return errors.New("bucket must have name") + } + + if bucketFactory.Description == "" { + return errors.New("description is mandatory") + } + + switch bucketFactory.Type { + case "leaky": + if err := validateLeakyType(bucketFactory); err != nil { + return err } - } else { + case "counter": + if err := validateCounterType(bucketFactory); err != nil { + return err + } + case "trigger": + if err := validateTriggerType(bucketFactory); err != nil { + return err + } + case "conditional": + if err := validateConditionalType(bucketFactory); err != nil { + return err + } + case "bayesian": + if err := validateBayesianType(bucketFactory); err != nil { + return err + } + default: return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) } @@ -230,8 +271,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s : %v", f, err) - return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) + log.Errorf("Bad yaml in %s: %v", f, err) + return nil, nil, fmt.Errorf("bad yaml in %s: %w", f, err) } log.Tracef("End of yaml file") @@ -282,8 +323,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = LoadBucket(&bucketFactory, tomb) if err != nil { - log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) - return nil, nil, fmt.Errorf("loading of %s failed : %v", bucketFactory.Name, err) + log.Errorf("Failed to load bucket %s: %v", bucketFactory.Name, err) + return nil, nil, fmt.Errorf("loading of %s failed: %w", bucketFactory.Name, err) } bucketFactory.orderEvent = orderEvent @@ -326,7 +367,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.LeakSpeed != "" { if bucketFactory.leakspeed, err = time.ParseDuration(bucketFactory.LeakSpeed); err != nil { - return fmt.Errorf("bad leakspeed '%s' in %s : %v", bucketFactory.LeakSpeed, bucketFactory.Filename, err) + return fmt.Errorf("bad leakspeed '%s' in %s: %w", bucketFactory.LeakSpeed, bucketFactory.Filename, err) } } else { bucketFactory.leakspeed = time.Duration(0) @@ -334,7 +375,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Duration != "" { if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { - return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) + return fmt.Errorf("invalid Duration '%s' in %s: %w", bucketFactory.Duration, bucketFactory.Filename, err) } } @@ -345,13 +386,13 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) + return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err) } if bucketFactory.GroupBy != "" { bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("invalid groupby '%s' in %s : %v", bucketFactory.GroupBy, bucketFactory.Filename, err) + return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err) } } @@ -370,7 +411,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { case "bayesian": bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) default: - return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err) + return fmt.Errorf("invalid type '%s' in %s: %w", bucketFactory.Type, bucketFactory.Filename, err) } if bucketFactory.Distinct != "" { @@ -435,7 +476,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.output = false if err := ValidateFactory(bucketFactory); err != nil { - return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) + return fmt.Errorf("invalid bucket from %s: %w", bucketFactory.Filename, err) } bucketFactory.tomb = tomb diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 3ee067177ef..e67698e8473 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -19,66 +19,77 @@ import ( // SourceFromEvent extracts and formats a valid models.Source object from an Event func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { - srcs := make(map[string]models.Source) /*if it's already an overflow, we have properly formatted sources. we can just twitch them to reflect the requested scope*/ if evt.Type == types.OVFLW { - for k, v := range evt.Overflow.Sources { - /*the scopes are already similar, nothing to do*/ - if leaky.scopeType.Scope == *v.Scope { - srcs[k] = v - continue - } + return overflowEventSources(evt, leaky) + } - /*The bucket requires a decision on scope Range */ - if leaky.scopeType.Scope == types.Range { - /*the original bucket was target IPs, check that we do have range*/ - if *v.Scope == types.Ip { - src := models.Source{} - src.AsName = v.AsName - src.AsNumber = v.AsNumber - src.Cn = v.Cn - src.Latitude = v.Latitude - src.Longitude = v.Longitude - src.Range = v.Range - src.Value = new(string) - src.Scope = new(string) - *src.Scope = leaky.scopeType.Scope - *src.Value = "" - - if v.Range != "" { - *src.Value = v.Range - } + return eventSources(evt, leaky) +} - if leaky.scopeType.RunTimeFilter != nil { - retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) - if err != nil { - return srcs, fmt.Errorf("while running scope filter: %w", err) - } +func overflowEventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) - value, ok := retValue.(string) - if !ok { - value = "" - } + for k, v := range evt.Overflow.Sources { + /*the scopes are already similar, nothing to do*/ + if leaky.scopeType.Scope == *v.Scope { + srcs[k] = v + continue + } - src.Value = &value + /*The bucket requires a decision on scope Range */ + if leaky.scopeType.Scope == types.Range { + /*the original bucket was target IPs, check that we do have range*/ + if *v.Scope == types.Ip { + src := models.Source{} + src.AsName = v.AsName + src.AsNumber = v.AsNumber + src.Cn = v.Cn + src.Latitude = v.Latitude + src.Longitude = v.Longitude + src.Range = v.Range + src.Value = new(string) + src.Scope = new(string) + *src.Scope = leaky.scopeType.Scope + *src.Value = "" + + if v.Range != "" { + *src.Value = v.Range + } + + if leaky.scopeType.RunTimeFilter != nil { + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) + if err != nil { + return srcs, fmt.Errorf("while running scope filter: %w", err) } - if *src.Value != "" { - srcs[*src.Value] = src - } else { - log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) + value, ok := retValue.(string) + if !ok { + value = "" } + + src.Value = &value + } + + if *src.Value != "" { + srcs[*src.Value] = src } else { - log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", - leaky.Name, *v.Scope, *v.Value) + log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) } + } else { + log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", + leaky.Name, *v.Scope, *v.Value) } } - - return srcs, nil } + return srcs, nil +} + +func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) + src := models.Source{} switch leaky.scopeType.Scope { @@ -236,9 +247,10 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { // alertFormatSource iterates over the queue to collect sources func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Source, string, error) { - var sources = make(map[string]models.Source) var source_type string + sources := make(map[string]models.Source) + log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) for _, evt := range queue.Queue { @@ -299,6 +311,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { StopAt: &stopAt, Simulated: &leaky.Simulated, } + if leaky.BucketConfig == nil { return runtimeAlert, errors.New("leaky.BucketConfig is nil") } From 08fdfc4fb07be0d1b13d43b47d8a7185870df7e1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 16:20:40 +0200 Subject: [PATCH 254/318] cscli refact: package 'cliconsole' (#3149) * cscli refact: package 'cliconsole' * dry * lint * lint --- .golangci.yml | 1 - cmd/crowdsec-cli/capi.go | 2 +- cmd/crowdsec-cli/{ => cliconsole}/console.go | 61 ++++++------------- .../{ => cliconsole}/console_table.go | 2 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 2 +- cmd/crowdsec-cli/itemcli.go | 12 ++-- cmd/crowdsec-cli/lapi.go | 2 +- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/messages.go | 23 ------- cmd/crowdsec-cli/reload.go | 6 ++ cmd/crowdsec-cli/reload_freebsd.go | 4 ++ cmd/crowdsec-cli/reload_linux.go | 4 ++ cmd/crowdsec-cli/reload_windows.go | 3 + cmd/crowdsec-cli/simulation.go | 2 +- pkg/acquisition/modules/appsec/utils.go | 49 +++++++++------ pkg/acquisition/modules/file/file.go | 5 +- pkg/alertcontext/alertcontext.go | 10 +-- pkg/apiclient/auth_service_test.go | 2 +- pkg/longpollclient/client.go | 2 +- 19 files changed, 90 insertions(+), 106 deletions(-) rename cmd/crowdsec-cli/{ => cliconsole}/console.go (87%) rename cmd/crowdsec-cli/{ => cliconsole}/console_table.go (98%) delete mode 100644 cmd/crowdsec-cli/messages.go create mode 100644 cmd/crowdsec-cli/reload.go create mode 100644 cmd/crowdsec-cli/reload_freebsd.go create mode 100644 cmd/crowdsec-cli/reload_linux.go create mode 100644 cmd/crowdsec-cli/reload_windows.go diff --git a/.golangci.yml b/.golangci.yml index 6da59142691..2b216259770 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,7 +17,6 @@ linters-settings: disable: - reflectvaluecompare - fieldalignment - - printf maintidx: # raise this after refactoring diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 1888aa3545a..589b36adade 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -119,7 +119,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { fmt.Println(string(apiConfigDump)) } - log.Warning(ReloadMessage()) + log.Warning(reloadMessage) return nil } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/cliconsole/console.go similarity index 87% rename from cmd/crowdsec-cli/console.go rename to cmd/crowdsec-cli/cliconsole/console.go index 979c9f0ea60..666afbba07f 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -1,4 +1,4 @@ -package main +package cliconsole import ( "context" @@ -28,13 +28,17 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + type cliConsole struct { - cfg configGetter + cfg func() *csconfig.Config + reloadMessage string } -func NewCLIConsole(cfg configGetter) *cliConsole { +func New(cfg configGetter, reloadMessage string) *cliConsole { return &cliConsole{ - cfg: cfg, + cfg: cfg, + reloadMessage: reloadMessage, } } @@ -221,7 +225,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Infof(ReloadMessage()) + log.Info(cli.reloadMessage) return nil }, @@ -255,7 +259,7 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Infof(ReloadMessage()) + log.Info(cli.reloadMessage) return nil }, @@ -348,13 +352,8 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { switch arg { case csconfig.CONSOLE_MANAGEMENT: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ConsoleManagement != nil { - if *consoleCfg.ConsoleManagement == wanted { - log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - } else { - log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - *consoleCfg.ConsoleManagement = wanted - } + if consoleCfg.ConsoleManagement != nil && *consoleCfg.ConsoleManagement == wanted { + log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) consoleCfg.ConsoleManagement = ptr.Of(wanted) @@ -386,52 +385,32 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { } case csconfig.SEND_CUSTOM_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareCustomScenarios != nil { - if *consoleCfg.ShareCustomScenarios == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - *consoleCfg.ShareCustomScenarios = wanted - } + if consoleCfg.ShareCustomScenarios != nil && *consoleCfg.ShareCustomScenarios == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) consoleCfg.ShareCustomScenarios = ptr.Of(wanted) } case csconfig.SEND_TAINTED_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareTaintedScenarios != nil { - if *consoleCfg.ShareTaintedScenarios == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - *consoleCfg.ShareTaintedScenarios = wanted - } + if consoleCfg.ShareTaintedScenarios != nil && *consoleCfg.ShareTaintedScenarios == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) consoleCfg.ShareTaintedScenarios = ptr.Of(wanted) } case csconfig.SEND_MANUAL_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareManualDecisions != nil { - if *consoleCfg.ShareManualDecisions == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - *consoleCfg.ShareManualDecisions = wanted - } + if consoleCfg.ShareManualDecisions != nil && *consoleCfg.ShareManualDecisions == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) consoleCfg.ShareManualDecisions = ptr.Of(wanted) } case csconfig.SEND_CONTEXT: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareContext != nil { - if *consoleCfg.ShareContext == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - *consoleCfg.ShareContext = wanted - } + if consoleCfg.ShareContext != nil && *consoleCfg.ShareContext == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) consoleCfg.ShareContext = ptr.Of(wanted) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/cliconsole/console_table.go similarity index 98% rename from cmd/crowdsec-cli/console_table.go rename to cmd/crowdsec-cli/cliconsole/console_table.go index 94976618573..8f17b97860a 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/cliconsole/console_table.go @@ -1,4 +1,4 @@ -package main +package cliconsole import ( "io" diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 62e68b6bc41..bc0da152d6d 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -129,7 +129,7 @@ func (*statBouncer) Description() (string, string) { func logWarningOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { - log.Warningf(msg) + log.Warning(msg) warningsLogged[msg] = true } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 64c18ae89b1..3f789a14ded 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -78,7 +78,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool return errors.New(msg) } - log.Errorf(msg) + log.Error(msg) continue } @@ -92,7 +92,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool } } - log.Infof(ReloadMessage()) + log.Info(reloadMessage) return nil } @@ -175,7 +175,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil @@ -217,7 +217,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil @@ -283,7 +283,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b log.Infof("Updated %d %s", updated, cli.name) if updated > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil @@ -314,7 +314,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if updated > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0b8bc59dad5..df4f0a98188 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -161,7 +161,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(ReloadMessage()) + log.Warning(reloadMessage) return nil } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index d4046414030..da955923962 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,8 +14,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -262,7 +262,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) + cmd.AddCommand(cliconsole.New(cli.cfg, reloadMessage).NewCommand()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/messages.go b/cmd/crowdsec-cli/messages.go deleted file mode 100644 index 02f051601e4..00000000000 --- a/cmd/crowdsec-cli/messages.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" - "runtime" -) - -// ReloadMessage returns a description of the task required to reload -// the crowdsec configuration, according to the operating system. -func ReloadMessage() string { - var msg string - - switch runtime.GOOS { - case "windows": - msg = "Please restart the crowdsec service" - case "freebsd": - msg = `Run 'sudo service crowdsec reload'` - default: - msg = `Run 'sudo systemctl reload crowdsec'` - } - - return fmt.Sprintf("%s for the new configuration to be effective.", msg) -} diff --git a/cmd/crowdsec-cli/reload.go b/cmd/crowdsec-cli/reload.go new file mode 100644 index 00000000000..8dd59be8d05 --- /dev/null +++ b/cmd/crowdsec-cli/reload.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package main + +// generic message since we don't know the platform +const reloadMessage = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_freebsd.go b/cmd/crowdsec-cli/reload_freebsd.go new file mode 100644 index 00000000000..991d3ea6080 --- /dev/null +++ b/cmd/crowdsec-cli/reload_freebsd.go @@ -0,0 +1,4 @@ +package main + +// actually sudo is not that popular on freebsd, but this will do +const reloadMessage = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_linux.go b/cmd/crowdsec-cli/reload_linux.go new file mode 100644 index 00000000000..a74adfbcdfd --- /dev/null +++ b/cmd/crowdsec-cli/reload_linux.go @@ -0,0 +1,4 @@ +package main + +// assume systemd, although gentoo and others may differ +const reloadMessage = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_windows.go b/cmd/crowdsec-cli/reload_windows.go new file mode 100644 index 00000000000..ec9a0b10741 --- /dev/null +++ b/cmd/crowdsec-cli/reload_windows.go @@ -0,0 +1,3 @@ +package main + +const reloadMessage = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index f8d8a660b8c..12c9980d588 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -44,7 +44,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } }, } diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 15de8046716..4fb1a979d14 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -40,14 +40,16 @@ func appendMeta(meta models.Meta, key string, value string) models.Meta { Key: key, Value: value, }) + return meta } func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { - //if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI + // if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI if !inEvt.Appsec.HasInBandMatches { return nil, nil } + evt := types.Event{} evt.Type = types.APPSEC evt.Process = true @@ -105,7 +107,6 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { evtRule.Meta = make(models.Meta, 0) for _, key := range appsecMetaKeys { - if tmpAppsecContext[key] == nil { tmpAppsecContext[key] = make([]string, 0) } @@ -113,18 +114,21 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { switch value := matched_rule[key].(type) { case string: evtRule.Meta = appendMeta(evtRule.Meta, key, value) + if value != "" && !slices.Contains(tmpAppsecContext[key], value) { tmpAppsecContext[key] = append(tmpAppsecContext[key], value) } case int: val := strconv.Itoa(value) evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { tmpAppsecContext[key] = append(tmpAppsecContext[key], val) } case []string: for _, v := range value { evtRule.Meta = appendMeta(evtRule.Meta, key, v) + if v != "" && !slices.Contains(tmpAppsecContext[key], v) { tmpAppsecContext[key] = append(tmpAppsecContext[key], v) } @@ -133,20 +137,21 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { for _, v := range value { val := strconv.Itoa(v) evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { tmpAppsecContext[key] = append(tmpAppsecContext[key], val) } - } default: val := fmt.Sprintf("%v", value) evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { tmpAppsecContext[key] = append(tmpAppsecContext[key], val) } - } } + alert.Events = append(alert.Events, &evtRule) } @@ -159,7 +164,7 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { valueStr, err := alertcontext.TruncateContext(values, alertcontext.MaxContextValueLen) if err != nil { - log.Warningf(err.Error()) + log.Warning(err.Error()) } meta := models.MetaItems0{ @@ -185,15 +190,16 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { alert.StopAt = ptr.Of(time.Now().UTC().Format(time.RFC3339)) evt.Overflow.APIAlerts = []models.Alert{alert} evt.Overflow.Alert = &alert + return &evt, nil } func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types.Event, error) { evt := types.Event{} - //we might want to change this based on in-band vs out-of-band ? + // we might want to change this based on in-band vs out-of-band ? evt.Type = types.LOG evt.ExpectMode = types.LIVE - //def needs fixing + // def needs fixing evt.Stage = "s00-raw" evt.Parsed = map[string]string{ "source_ip": r.ClientIP, @@ -203,19 +209,19 @@ func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types. "req_uuid": r.Tx.ID(), "source": "crowdsec-appsec", "remediation_cmpt_ip": r.RemoteAddrNormalized, - //TBD: - //http_status - //user_agent + // TBD: + // http_status + // user_agent } evt.Line = types.Line{ Time: time.Now(), - //should we add some info like listen addr/port/path ? + // should we add some info like listen addr/port/path ? Labels: labels, Process: true, Module: "appsec", Src: "appsec", - Raw: "dummy-appsec-data", //we discard empty Line.Raw items :) + Raw: "dummy-appsec-data", // we discard empty Line.Raw items :) } evt.Appsec = types.AppsecEvent{} @@ -247,29 +253,29 @@ func LogAppsecEvent(evt *types.Event, logger *log.Entry) { "target_uri": req, }).Debugf("%s triggered non-blocking rules on %s (%d rules) [%v]", evt.Parsed["source_ip"], req, len(evt.Appsec.MatchedRules), evt.Appsec.GetRuleIDs()) } - } func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedRequest) error { - if evt == nil { - //an error was already emitted, let's not spam the logs + // an error was already emitted, let's not spam the logs return nil } if !req.Tx.IsInterrupted() { - //if the phase didn't generate an interruption, we don't have anything to add to the event + // if the phase didn't generate an interruption, we don't have anything to add to the event return nil } - //if one interruption was generated, event is good for processing :) + // if one interruption was generated, event is good for processing :) evt.Process = true if evt.Meta == nil { evt.Meta = map[string]string{} } + if evt.Parsed == nil { evt.Parsed = map[string]string{} } + if req.IsInBand { evt.Meta["appsec_interrupted"] = "true" evt.Meta["appsec_action"] = req.Tx.Interruption().Action @@ -290,9 +296,11 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR if variable.Key() != "" { key += "." + variable.Key() } + if variable.Value() == "" { continue } + for _, collectionToKeep := range r.AppsecRuntime.CompiledVariablesTracking { match := collectionToKeep.MatchString(key) if match { @@ -303,6 +311,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR } } } + return true }) @@ -325,11 +334,12 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR ruleNameProm := fmt.Sprintf("%d", rule.Rule().ID()) if details, ok := appsec.AppsecRulesDetails[rule.Rule().ID()]; ok { - //Only set them for custom rules, not for rules written in seclang + // Only set them for custom rules, not for rules written in seclang name = details.Name version = details.Version hash = details.Hash ruleNameProm = details.Name + r.logger.Debugf("custom rule for event, setting name: %s, version: %s, hash: %s", name, version, hash) } else { name = fmt.Sprintf("native_rule:%d", rule.Rule().ID()) @@ -338,12 +348,15 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR AppsecRuleHits.With(prometheus.Labels{"rule_name": ruleNameProm, "type": kind, "source": req.RemoteAddrNormalized, "appsec_engine": req.AppsecEngine}).Inc() matchedZones := make([]string, 0) + for _, matchData := range rule.MatchedDatas() { zone := matchData.Variable().Name() + varName := matchData.Key() if varName != "" { zone += "." + varName } + matchedZones = append(matchedZones, zone) } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index c36672507db..34a7052f46f 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -385,7 +385,6 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er } filink, err := os.Lstat(file) - if err != nil { f.logger.Errorf("Could not lstat() new file %s, ignoring it : %s", file, err) continue @@ -578,7 +577,7 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai errMsg = fmt.Sprintf(errMsg+" : %s", err) } - logger.Warningf(errMsg) + logger.Warning(errMsg) return nil case line := <-tail.Lines: @@ -629,8 +628,8 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom var scanner *bufio.Scanner logger := f.logger.WithField("oneshot", filename) - fd, err := os.Open(filename) + fd, err := os.Open(filename) if err != nil { return fmt.Errorf("failed opening %s: %w", filename, err) } diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index c502def32cd..16ebc6d0ac2 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -32,7 +32,7 @@ func ValidateContextExpr(key string, expressions []string) error { for _, expression := range expressions { _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("compilation of '%s' failed: %v", expression, err) + return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } } @@ -74,7 +74,7 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { for _, value := range values { valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("compilation of '%s' context value failed: %v", value, err) + return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled) @@ -133,7 +133,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { output, err := expr.Run(value, map[string]interface{}{"evt": evt}) if err != nil { - errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err)) + errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) continue } @@ -143,7 +143,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { case int: val = strconv.Itoa(out) default: - errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output)) + errors = append(errors, fmt.Errorf("unexpected return type for %s: %T", key, output)) continue } @@ -161,7 +161,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { valueStr, err := TruncateContext(values, alertContext.ContextValueLen) if err != nil { - log.Warningf(err.Error()) + log.Warning(err.Error()) } meta := models.MetaItems0{ diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 3e887149a98..6c9abc0edef 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -161,7 +161,7 @@ func TestWatcherAuth(t *testing.T) { bodyBytes, err := io.ReadAll(resp.Response.Body) require.NoError(t, err) - log.Printf(string(bodyBytes)) + log.Print(string(bodyBytes)) t.Fatalf("The AuthenticateWatcher function should have returned an error for the response code %d", errorCodeToTest) } diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 9fa3b4b3f9a..0603b7a5e80 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -95,7 +95,7 @@ func (c *LongPollClient) poll() error { logger.Errorf("failed to read response body: %s", err) return err } - logger.Errorf(string(bodyContent)) + logger.Error(string(bodyContent)) return errUnauthorized } return fmt.Errorf("unexpected status code: %d", resp.StatusCode) From 3d27e83bf5db10895bc7c87ecff93f13cef5d067 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 17:36:07 +0200 Subject: [PATCH 255/318] pkg/cwhub: improve support for k8s config maps with custom items (#3154) * pkg/cwhub: improve support for k8s config maps as custom items - allow links to links - ignore hidden ..data directories, but allow links to their content * allow any number of subdirectories in /etc/crowdsec/{hubtype} * item name as subdir/file.yaml * improve func test * lint --- cmd/crowdsec-cli/capi.go | 7 +- pkg/cwhub/errors.go | 6 +- pkg/cwhub/relativepath.go | 28 +++++ pkg/cwhub/relativepath_test.go | 72 ++++++++++++ pkg/cwhub/sync.go | 198 ++++++++++++++++++++++----------- pkg/metabase/metabase.go | 4 +- test/bats/20_hub_items.bats | 92 ++++++++++++++- 7 files changed, 322 insertions(+), 85 deletions(-) create mode 100644 pkg/cwhub/relativepath.go create mode 100644 pkg/cwhub/relativepath_test.go diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 589b36adade..ac921ea547f 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -21,11 +21,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const ( - CAPIBaseURL = "https://api.crowdsec.net/" - CAPIURLPrefix = "v3" -) - type cliCapi struct { cfg configGetter } @@ -78,7 +73,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { Password: password, UserAgent: cwversion.UserAgent(), URL: apiurl, - VersionPrefix: CAPIURLPrefix, + VersionPrefix: "v3", }, nil) if err != nil { return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go index f1e779b5476..b0be444fcba 100644 --- a/pkg/cwhub/errors.go +++ b/pkg/cwhub/errors.go @@ -5,10 +5,8 @@ import ( "fmt" ) -var ( - // ErrNilRemoteHub is returned when trying to download with a local-only configuration. - ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") -) +// ErrNilRemoteHub is returned when trying to download with a local-only configuration. +var ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") // IndexNotFoundError is returned when the remote hub index is not found. type IndexNotFoundError struct { diff --git a/pkg/cwhub/relativepath.go b/pkg/cwhub/relativepath.go new file mode 100644 index 00000000000..bcd4c576840 --- /dev/null +++ b/pkg/cwhub/relativepath.go @@ -0,0 +1,28 @@ +package cwhub + +import ( + "path/filepath" + "strings" +) + +// relativePathComponents returns the list of path components after baseDir. +// If path is not inside baseDir, it returns an empty slice. +func relativePathComponents(path string, baseDir string) []string { + absPath, err := filepath.Abs(path) + if err != nil { + return []string{} + } + + absBaseDir, err := filepath.Abs(baseDir) + if err != nil { + return []string{} + } + + // is path inside baseDir? + relPath, err := filepath.Rel(absBaseDir, absPath) + if err != nil || strings.HasPrefix(relPath, "..") || relPath == "." { + return []string{} + } + + return strings.Split(relPath, string(filepath.Separator)) +} diff --git a/pkg/cwhub/relativepath_test.go b/pkg/cwhub/relativepath_test.go new file mode 100644 index 00000000000..11eba566064 --- /dev/null +++ b/pkg/cwhub/relativepath_test.go @@ -0,0 +1,72 @@ +package cwhub + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRelativePathComponents(t *testing.T) { + tests := []struct { + name string + path string + baseDir string + expected []string + }{ + { + name: "Path within baseDir", + path: "/home/user/project/src/file.go", + baseDir: "/home/user/project", + expected: []string{"src", "file.go"}, + }, + { + name: "Path is baseDir", + path: "/home/user/project", + baseDir: "/home/user/project", + expected: []string{}, + }, + { + name: "Path outside baseDir", + path: "/home/user/otherproject/src/file.go", + baseDir: "/home/user/project", + expected: []string{}, + }, + { + name: "Path is subdirectory of baseDir", + path: "/home/user/project/src/", + baseDir: "/home/user/project", + expected: []string{"src"}, + }, + { + name: "Relative paths", + path: "project/src/file.go", + baseDir: "project", + expected: []string{"src", "file.go"}, + }, + { + name: "BaseDir with trailing slash", + path: "/home/user/project/src/file.go", + baseDir: "/home/user/project/", + expected: []string{"src", "file.go"}, + }, + { + name: "Empty baseDir", + path: "/home/user/project/src/file.go", + baseDir: "", + expected: []string{}, + }, + { + name: "Empty path", + path: "", + baseDir: "/home/user/project", + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := relativePathComponents(tt.path, tt.baseDir) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 38bb376ae3b..81d41d55971 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -20,22 +20,49 @@ func isYAMLFileName(path string) bool { return strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") } -// linkTarget returns the target of a symlink, or empty string if it's dangling. -func linkTarget(path string, logger *logrus.Logger) (string, error) { - hubpath, err := os.Readlink(path) - if err != nil { - return "", fmt.Errorf("unable to read symlink: %s", path) +// resolveSymlink returns the ultimate target path of a symlink +// returns error if the symlink is dangling or too many symlinks are followed +func resolveSymlink(path string) (string, error) { + const maxSymlinks = 10 // Prevent infinite loops + for i := 0; i < maxSymlinks; i++ { + fi, err := os.Lstat(path) + if err != nil { + return "", err // dangling link + } + + if fi.Mode()&os.ModeSymlink == 0 { + // found the target + return path, nil + } + + path, err = os.Readlink(path) + if err != nil { + return "", err + } + + // relative to the link's directory? + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(path), path) + } } - logger.Tracef("symlink %s -> %s", path, hubpath) + return "", errors.New("too many levels of symbolic links") +} - _, err = os.Lstat(hubpath) - if os.IsNotExist(err) { - logger.Warningf("link target does not exist: %s -> %s", path, hubpath) - return "", nil +// isPathInside checks if a path is inside the given directory +// it can return false negatives if the filesystem is case insensitive +func isPathInside(path, dir string) (bool, error) { + absFilePath, err := filepath.Abs(path) + if err != nil { + return false, err + } + + absDir, err := filepath.Abs(dir) + if err != nil { + return false, err } - return hubpath, nil + return strings.HasPrefix(absFilePath, absDir), nil } // information used to create a new Item, from a file path. @@ -53,58 +80,76 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo hubDir := h.local.HubDir installDir := h.local.InstallDir - subs := strings.Split(path, string(os.PathSeparator)) + subsHub := relativePathComponents(path, hubDir) + subsInstall := relativePathComponents(path, installDir) - logger.Tracef("path:%s, hubdir:%s, installdir:%s", path, hubDir, installDir) - logger.Tracef("subs:%v", subs) - // we're in hub (~/.hub/hub/) - if strings.HasPrefix(path, hubDir) { + switch { + case len(subsHub) > 0: logger.Tracef("in hub dir") - // .../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml - // .../hub/scenarios/crowdsec/ssh_bf.yaml - // .../hub/profiles/crowdsec/linux.yaml - if len(subs) < 4 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml + // .../hub/scenarios/crowdsecurity/ssh_bf.yaml + // .../hub/profiles/crowdsecurity/linux.yaml + if len(subsHub) < 3 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + } + + ftype := subsHub[0] + if !slices.Contains(ItemTypes, ftype) { + // this doesn't really happen anymore, because we only scan the {hubtype} directories + return nil, fmt.Errorf("unknown configuration type '%s'", ftype) + } + + stage := "" + fauthor := subsHub[1] + fname := subsHub[2] + + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subsHub[1] + fauthor = subsHub[2] + fname = subsHub[3] } ret = &itemFileInfo{ inhub: true, - fname: subs[len(subs)-1], - fauthor: subs[len(subs)-2], - stage: subs[len(subs)-3], - ftype: subs[len(subs)-4], + ftype: ftype, + stage: stage, + fauthor: fauthor, + fname: fname, } - } else if strings.HasPrefix(path, installDir) { // we're in install /etc/crowdsec//... + + case len(subsInstall) > 0: logger.Tracef("in install dir") - if len(subs) < 3 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) - } // .../config/parser/stage/file.yaml // .../config/postoverflow/stage/file.yaml // .../config/scenarios/scenar.yaml // .../config/collections/linux.yaml //file is empty - ret = &itemFileInfo{ - inhub: false, - fname: subs[len(subs)-1], - stage: subs[len(subs)-2], - ftype: subs[len(subs)-3], - fauthor: "", + + if len(subsInstall) < 2 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsInstall)) } - } else { - return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) - } - logger.Tracef("stage:%s ftype:%s", ret.stage, ret.ftype) + // this can be in any number of subdirs, we join them to compose the item name + + ftype := subsInstall[0] + stage := "" + fname := strings.Join(subsInstall[1:], "/") - if ret.ftype != PARSERS && ret.ftype != POSTOVERFLOWS { - if !slices.Contains(ItemTypes, ret.stage) { - return nil, errors.New("unknown configuration type") + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subsInstall[1] + fname = strings.Join(subsInstall[2:], "/") } - ret.ftype = ret.stage - ret.stage = "" + ret = &itemFileInfo{ + inhub: false, + ftype: ftype, + stage: stage, + fauthor: "", + fname: fname, + } + default: + return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) } logger.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", ret.fname, ret.fauthor, ret.stage, ret.ftype) @@ -176,8 +221,6 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { } func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { - hubpath := "" - if err != nil { h.logger.Debugf("while syncing hub dir: %s", err) // there is a path error, we ignore the file @@ -190,8 +233,26 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return err } + // permission errors, files removed while reading, etc. + if f == nil { + return nil + } + + if f.IsDir() { + // if a directory starts with a dot, we don't traverse it + // - single dot prefix is hidden by unix convention + // - double dot prefix is used by k8s to mount config maps + if strings.HasPrefix(f.Name(), ".") { + h.logger.Tracef("skipping hidden directory %s", path) + return filepath.SkipDir + } + + // keep traversing + return nil + } + // we only care about YAML files - if f == nil || f.IsDir() || !isYAMLFileName(f.Name()) { + if !isYAMLFileName(f.Name()) { return nil } @@ -201,35 +262,38 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return nil } - // non symlinks are local user files or hub files - if f.Type()&os.ModeSymlink == 0 { - h.logger.Tracef("%s is not a symlink", path) - - if !info.inhub { - h.logger.Tracef("%s is a local file, skip", path) + // follow the link to see if it falls in the hub directory + // if it's not a link, target == path + target, err := resolveSymlink(path) + if err != nil { + // target does not exist, the user might have removed the file + // or switched to a hub branch without it; or symlink loop + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil + } - item, err := newLocalItem(h, path, info) - if err != nil { - return err - } + targetInHub, err := isPathInside(target, h.local.HubDir) + if err != nil { + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil + } - h.addItem(item) + // local (custom) item if the file or link target is not inside the hub dir + if !targetInHub { + h.logger.Tracef("%s is a local file, skip", path) - return nil - } - } else { - hubpath, err = linkTarget(path, h.logger) + item, err := newLocalItem(h, path, info) if err != nil { return err } - if hubpath == "" { - // target does not exist, the user might have removed the file - // or switched to a hub branch without it - return nil - } + h.addItem(item) + + return nil } + hubpath := target + // try to find which configuration item it is h.logger.Tracef("check [%s] of %s", info.fname, info.ftype) diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go index 837bab796d5..324a05666a1 100644 --- a/pkg/metabase/metabase.go +++ b/pkg/metabase/metabase.go @@ -70,12 +70,12 @@ func (m *Metabase) Init(containerName string, image string) error { switch m.Config.Database.Type { case "mysql": - return fmt.Errorf("'mysql' is not supported yet for cscli dashboard") + return errors.New("'mysql' is not supported yet for cscli dashboard") //DBConnectionURI = fmt.Sprintf("MB_DB_CONNECTION_URI=mysql://%s:%d/%s?user=%s&password=%s&allowPublicKeyRetrieval=true", remoteDBAddr, m.Config.Database.Port, m.Config.Database.DbName, m.Config.Database.User, m.Config.Database.Password) case "sqlite": m.InternalDBURL = metabaseSQLiteDBURL case "postgresql", "postgres", "pgsql": - return fmt.Errorf("'postgresql' is not supported yet by cscli dashboard") + return errors.New("'postgresql' is not supported yet by cscli dashboard") default: return fmt.Errorf("database '%s' not supported", m.Config.Database.Type) } diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 214d07d927f..4b390c90ed4 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -176,7 +176,7 @@ teardown() { rune -0 mkdir -p "$CONFIG_DIR/collections" rune -0 ln -s /this/does/not/exist.yaml "$CONFIG_DIR/collections/foobar.yaml" rune -0 cscli hub list - assert_stderr --partial "link target does not exist: $CONFIG_DIR/collections/foobar.yaml -> /this/does/not/exist.yaml" + assert_stderr --partial "Ignoring file $CONFIG_DIR/collections/foobar.yaml: lstat /this/does/not/exist.yaml: no such file or directory" rune -0 cscli hub list -o json rune -0 jq '.collections' <(output) assert_json '[]' @@ -194,9 +194,89 @@ teardown() { assert_output 'false' } -@test "skip files if we can't guess their type" { - rune -0 mkdir -p "$CONFIG_DIR/scenarios/foo" - rune -0 touch "$CONFIG_DIR/scenarios/foo/bar.yaml" - rune -0 cscli hub list - assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/foo/bar.yaml: unknown configuration type" +@test "don't traverse hidden directories (starting with a dot)" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/.foo" + rune -0 touch "$CONFIG_DIR/scenarios/.foo/bar.yaml" + rune -0 cscli hub list --trace + assert_stderr --partial "skipping hidden directory $CONFIG_DIR/scenarios/.foo" +} + +@test "allow symlink to target inside a hidden directory" { + # k8s config maps use hidden directories and links when mounted + rune -0 mkdir -p "$CONFIG_DIR/scenarios/.foo" + + # ignored + rune -0 touch "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # real file + rune -0 touch "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 1 + + rune -0 rm "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # link to ignored is not ignored, and the name comes from the link + rune -0 ln -s "$CONFIG_DIR/scenarios/.foo/hidden.yaml" "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq -c '[.scenarios[].name] | sort' <(output) + assert_json '["myfoo.yaml"]' +} + +@test "item files can be links to links" { + rune -0 mkdir -p "$CONFIG_DIR"/scenarios/{.foo,.bar} + + rune -0 ln -s "$CONFIG_DIR/scenarios/.foo/hidden.yaml" "$CONFIG_DIR/scenarios/.bar/hidden.yaml" + + # link to a danling link + rune -0 ln -s "$CONFIG_DIR/scenarios/.bar/hidden.yaml" "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/myfoo.yaml: lstat $CONFIG_DIR/scenarios/.foo/hidden.yaml: no such file or directory" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # detect link loops + rune -0 ln -s "$CONFIG_DIR/scenarios/.bar/hidden.yaml" "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/myfoo.yaml: too many levels of symbolic links" + + rune -0 rm "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 touch "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 1 +} + +@test "item files can be in a subdirectory" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/sub/sub2/sub3" + rune -0 touch "$CONFIG_DIR/scenarios/sub/imlocal.yaml" + # subdir name is now part of the item name + rune -0 cscli scenarios inspect sub/imlocal.yaml -o json + rune -0 jq -e '[.tainted,.local==false,true]' <(output) + rune -0 rm "$CONFIG_DIR/scenarios/sub/imlocal.yaml" + + rune -0 ln -s "$HUB_DIR/scenarios/crowdsecurity/smb-bf.yaml" "$CONFIG_DIR/scenarios/sub/smb-bf.yaml" + rune -0 cscli scenarios inspect crowdsecurity/smb-bf -o json + rune -0 jq -e '[.tainted,.local==false,false]' <(output) + rune -0 rm "$CONFIG_DIR/scenarios/sub/smb-bf.yaml" + + rune -0 ln -s "$HUB_DIR/scenarios/crowdsecurity/smb-bf.yaml" "$CONFIG_DIR/scenarios/sub/sub2/sub3/smb-bf.yaml" + rune -0 cscli scenarios inspect crowdsecurity/smb-bf -o json + rune -0 jq -e '[.tainted,.local==false,false]' <(output) +} + +@test "same file name for local items in different subdirectories" { + rune -0 mkdir -p "$CONFIG_DIR"/scenarios/{foo,bar} + rune -0 touch "$CONFIG_DIR/scenarios/foo/local.yaml" + rune -0 touch "$CONFIG_DIR/scenarios/bar/local.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq -c '[.scenarios[].name] | sort' <(output) + assert_json '["bar/local.yaml","foo/local.yaml"]' } From 429418ffc6f1ef1bbae7c9f73c36abe4848c7151 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 21 Aug 2024 10:24:18 +0200 Subject: [PATCH 256/318] cscli refact: package 'cliexplain' (#3151) --- cmd/crowdsec-cli/{ => cliexplain}/explain.go | 19 ++++++---- cmd/crowdsec-cli/main.go | 39 ++++++++++---------- 2 files changed, 32 insertions(+), 26 deletions(-) rename cmd/crowdsec-cli/{ => cliexplain}/explain.go (93%) diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/cliexplain/explain.go similarity index 93% rename from cmd/crowdsec-cli/explain.go rename to cmd/crowdsec-cli/cliexplain/explain.go index c322cce47fe..182e34a12a5 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/cliexplain/explain.go @@ -1,4 +1,4 @@ -package main +package cliexplain import ( "bufio" @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -40,9 +41,12 @@ func getLineCountForFile(filepath string) (int, error) { return lc, nil } +type configGetter func() *csconfig.Config + type cliExplain struct { - cfg configGetter - flags struct { + cfg configGetter + configFilePath string + flags struct { logFile string dsn string logLine string @@ -56,9 +60,10 @@ type cliExplain struct { } } -func NewCLIExplain(cfg configGetter) *cliExplain { +func New(cfg configGetter, configFilePath string) *cliExplain { return &cliExplain{ - cfg: cfg, + cfg: cfg, + configFilePath: configFilePath, } } @@ -103,7 +108,7 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - flags.StringVar(&cli.flags.crowdsec, "crowdsec", "crowdsec", "Path to crowdsec") flags.BoolVar(&cli.flags.noClean, "no-clean", false, "Don't clean runtime environment after tests") - cmd.MarkFlagRequired("type") + _ = cmd.MarkFlagRequired("type") cmd.MarkFlagsOneRequired("log", "file", "dsn") return cmd @@ -214,7 +219,7 @@ func (cli *cliExplain) run() error { return errors.New("no acquisition (--file or --dsn) provided, can't run cscli test") } - cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} + cmdArgs := []string{"-c", cli.configFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} if labels != "" { log.Debugf("adding labels %s", labels) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index da955923962..49140b160ad 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -152,14 +153,6 @@ func (cli *cliRoot) initialize() error { return nil } -// list of valid subcommands for the shell completion -var validArgs = []string{ - "alerts", "appsec-configs", "appsec-rules", "bouncers", "capi", "collections", - "completion", "config", "console", "contexts", "dashboard", "decisions", "explain", - "hub", "hubtest", "lapi", "machines", "metrics", "notifications", "parsers", - "postoverflows", "scenarios", "simulation", "support", "version", -} - func (cli *cliRoot) colorize(cmd *cobra.Command) { cc.Init(&cc.Config{ RootCmd: cmd, @@ -191,6 +184,14 @@ func (cli *cliRoot) NewCommand() (*cobra.Command, error) { return nil, fmt.Errorf("failed to set feature flags from env: %w", err) } + // list of valid subcommands for the shell completion + validArgs := []string{ + "alerts", "appsec-configs", "appsec-rules", "bouncers", "capi", "collections", + "completion", "config", "console", "contexts", "dashboard", "decisions", "explain", + "hub", "hubtest", "lapi", "machines", "metrics", "notifications", "parsers", + "postoverflows", "scenarios", "simulation", "support", "version", + } + cmd := &cobra.Command{ Use: "cscli", Short: "cscli allows you to manage crowdsec", @@ -238,16 +239,6 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall return nil, err } - if len(os.Args) > 1 { - cobra.OnInitialize( - func() { - if err := cli.initialize(); err != nil { - log.Fatal(err) - } - }, - ) - } - cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) @@ -263,7 +254,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(cliconsole.New(cli.cfg, reloadMessage).NewCommand()) - cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) + cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) @@ -280,6 +271,16 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) } + if len(os.Args) > 1 { + cobra.OnInitialize( + func() { + if err := cli.initialize(); err != nil { + log.Fatal(err) + } + }, + ) + } + return cmd, nil } From 89aec7cf6b3e04afa823bda4aecb465cd357f946 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:12:40 +0200 Subject: [PATCH 257/318] pkg/cwhub: simpler accessor methods (#3165) * pkg/cwhub: simpler accessor methods - prefer higher level GetItemsByType, GetInstalledByType over GetItemMap - always send both appsec-rules and scenarios to api - explicit parameter for (case insensitive) sorted list of items - shorter code - assume itemType parameter makes sense, don't error * lint (gofumpt) --- cmd/crowdsec-cli/capi.go | 35 +++------- cmd/crowdsec-cli/cliconsole/console.go | 22 ++----- cmd/crowdsec-cli/hub.go | 7 +- cmd/crowdsec-cli/item_suggest.go | 20 ++---- cmd/crowdsec-cli/itemcli.go | 18 ++--- cmd/crowdsec-cli/items.go | 15 +++-- cmd/crowdsec-cli/lapi.go | 9 +-- cmd/crowdsec/lapiclient.go | 34 ++-------- cmd/crowdsec/lpmetrics.go | 8 +-- cmd/crowdsec/main.go | 6 +- pkg/alertcontext/config.go | 10 +-- pkg/appsec/appsec.go | 71 +++++++++----------- pkg/appsec/loader.go | 10 +-- pkg/cwhub/cwhub.go | 8 --- pkg/cwhub/doc.go | 2 +- pkg/cwhub/hub.go | 91 ++++++++++++-------------- pkg/hubtest/hubtest_item.go | 39 +++++------ pkg/parser/unix_parser.go | 35 +++++----- test/bats/04_capi.bats | 2 +- 19 files changed, 160 insertions(+), 282 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index ac921ea547f..64bca9f7caf 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -145,19 +145,15 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { // QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { - apiURL, err := url.Parse(credURL) if err != nil { return false, false, fmt.Errorf("parsing api url: %w", err) } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return false, false, fmt.Errorf("failed to get scenarios: %w", err) - } + itemsForAPI := hub.GetInstalledListForAPI() - if len(scenarios) == 0 { - return false, false, errors.New("no scenarios installed, abort") + if len(itemsForAPI) == 0 { + return false, false, errors.New("no scenarios or appsec-rules installed, abort") } passwd := strfmt.Password(password) @@ -165,29 +161,16 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri client, err := apiclient.NewClient(&apiclient.Config{ MachineID: login, Password: passwd, - Scenarios: scenarios, + Scenarios: itemsForAPI, UserAgent: cwversion.UserAgent(), URL: apiURL, - //I don't believe papi is neede to check enrollement - //PapiURL: papiURL, + // I don't believe papi is neede to check enrollement + // PapiURL: papiURL, VersionPrefix: "v3", UpdateScenario: func() ([]string, error) { - l_scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(l_scenarios)+len(appsecRules)) - ret = append(ret, l_scenarios...) - ret = append(ret, appsecRules...) - - return ret, nil + return itemsForAPI, nil }, }) - if err != nil { return false, false, fmt.Errorf("new client api: %w", err) } @@ -197,7 +180,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri t := models.WatcherAuthRequest{ MachineID: &login, Password: &pw, - Scenarios: scenarios, + Scenarios: itemsForAPI, } authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) @@ -211,7 +194,6 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return true, true, nil } return true, false, nil - } func (cli *cliCapi) status() error { @@ -232,7 +214,6 @@ func (cli *cliCapi) status() error { log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) - if err != nil { return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 666afbba07f..158f33c9136 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -23,7 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -78,20 +77,6 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st return fmt.Errorf("could not parse CAPI URL: %w", err) } - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get installed scenarios: %w", err) - } - - if len(scenarios) == 0 { - scenarios = make([]string, 0) - } - enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} if len(opts) != 0 { @@ -129,10 +114,15 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st } } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + c, _ := apiclient.NewClient(&apiclient.Config{ MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, - Scenarios: scenarios, + Scenarios: hub.GetInstalledListForAPI(), UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v3", diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 7e00eb64b33..e6cba08940a 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -148,16 +148,11 @@ func (cli *cliHub) upgrade(ctx context.Context, force bool) error { } for _, itemType := range cwhub.ItemTypes { - items, err := hub.GetInstalledItemsByType(itemType) - if err != nil { - return err - } - updated := 0 log.Infof("Upgrading %s", itemType) - for _, item := range items { + for _, item := range hub.GetInstalledByType(itemType, true) { didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/item_suggest.go index 0ea656549ba..7d3e1e728ae 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/item_suggest.go @@ -19,7 +19,7 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str score := 100 nearest := "" - for _, item := range hub.GetItemMap(itemType) { + for _, item := range hub.GetItemsByType(itemType, false) { d := levenshtein.Distance(itemName, item.Name, nil) if d < score { score = d @@ -44,7 +44,7 @@ func compAllItems(itemType string, args []string, toComplete string, cfg configG comp := make([]string, 0) - for _, item := range hub.GetItemMap(itemType) { + for _, item := range hub.GetItemsByType(itemType, false) { if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { comp = append(comp, item.Name) } @@ -61,22 +61,14 @@ func compInstalledItems(itemType string, args []string, toComplete string, cfg c return nil, cobra.ShellCompDirectiveDefault } - items, err := hub.GetInstalledNamesByType(itemType) - if err != nil { - cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true) - return nil, cobra.ShellCompDirectiveDefault - } + items := hub.GetInstalledByType(itemType, true) comp := make([]string, 0) - if toComplete != "" { - for _, item := range items { - if strings.Contains(item, toComplete) { - comp = append(comp, item) - } + for _, item := range items { + if strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) } - } else { - comp = items } cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 3f789a14ded..85647a5f6e8 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -147,19 +147,14 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error } if all { - getter := hub.GetInstalledItemsByType + itemGetter := hub.GetInstalledByType if purge { - getter = hub.GetItemsByType - } - - items, err := getter(cli.name) - if err != nil { - return err + itemGetter = hub.GetItemsByType } removed := 0 - for _, item := range items { + for _, item := range itemGetter(cli.name, true) { didRemove, err := item.Remove(purge, force) if err != nil { return err @@ -262,14 +257,9 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if all { - items, err := hub.GetInstalledItemsByType(cli.name) - if err != nil { - return err - } - updated := 0 - for _, item := range items { + for _, item := range hub.GetInstalledByType(cli.name, true) { didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index b0c03922166..5a4fee4d582 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -17,7 +17,12 @@ import ( // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { - itemNames := hub.GetNamesByType(itemType) + allItems := hub.GetItemsByType(itemType, true) + + itemNames := make([]string, len(allItems)) + for idx, item := range allItems { + itemNames[idx] = item.Name + } notExist := []string{} @@ -38,7 +43,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b installedOnly = false } - items := make([]*cwhub.Item, 0, len(itemNames)) + wantedItems := make([]*cwhub.Item, 0, len(itemNames)) for _, itemName := range itemNames { item := hub.GetItem(itemType, itemName) @@ -46,12 +51,10 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b continue } - items = append(items, item) + wantedItems = append(wantedItems, item) } - cwhub.SortItemSlice(items) - - return items, nil + return wantedItems, nil } func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index df4f0a98188..6e13dd94436 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -45,11 +45,6 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return fmt.Errorf("parsing api url: %w", err) } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) - } - client, err := apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, cwversion.UserAgent(), @@ -60,10 +55,12 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri pw := strfmt.Password(password) + itemsForAPI := hub.GetInstalledListForAPI() + t := models.WatcherAuthRequest{ MachineID: &login, Password: &pw, - Scenarios: scenarios, + Scenarios: itemsForAPI, } _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index 6cc0fba9515..cbafb460042 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -16,20 +16,6 @@ import ( ) func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) - } - - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) - } - - installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) - apiURL, err := url.Parse(credentials.URL) if err != nil { return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) @@ -42,28 +28,18 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. password := strfmt.Password(credentials.Password) + itemsForAPI := hub.GetInstalledListForAPI() + client, err := apiclient.NewClient(&apiclient.Config{ MachineID: credentials.Login, Password: password, - Scenarios: installedScenariosAndAppsecRules, + Scenarios: itemsForAPI, UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(scenarios)+len(appsecRules)) - ret = append(ret, scenarios...) - ret = append(ret, appsecRules...) - - return ret, nil + return itemsForAPI, nil }, }) if err != nil { @@ -73,7 +49,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ MachineID: &credentials.Login, Password: &password, - Scenarios: installedScenariosAndAppsecRules, + Scenarios: itemsForAPI, }) if err != nil { return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 0fd27054071..24842851294 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -7,7 +7,6 @@ import ( "time" "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -46,10 +45,8 @@ func getHubState(hub *cwhub.Hub) models.HubItems { for _, itemType := range cwhub.ItemTypes { ret[itemType] = []models.HubItem{} - items, _ := hub.GetInstalledItemsByType(itemType) - cwhub.SortItemSlice(items) - for _, item := range items { + for _, item := range hub.GetInstalledByType(itemType, true) { status := "official" if item.State.IsLocal() { status = "custom" @@ -90,7 +87,8 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou } func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, - consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub, +) *MetricsProvider { return &MetricsProvider{ apic: apic, interval: interval, diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 18416e044e7..6d8ca24c335 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -91,10 +91,8 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { files []string ) - for _, hubScenarioItem := range hub.GetItemMap(cwhub.SCENARIOS) { - if hubScenarioItem.State.Installed { - files = append(files, hubScenarioItem.State.LocalPath) - } + for _, hubScenarioItem := range hub.GetInstalledByType(cwhub.SCENARIOS, false) { + files = append(files, hubScenarioItem.State.LocalPath) } buckets = leakybucket.NewBuckets() diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index 21d16db3972..da05c937b18 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -98,20 +98,14 @@ func addContextFromFile(toSend map[string][]string, filePath string) error { return nil } - // LoadConsoleContext loads the context from the hub (if provided) and the file console_context_path. func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { c.Crowdsec.ContextToSend = make(map[string][]string, 0) if hub != nil { - items, err := hub.GetInstalledItemsByType(cwhub.CONTEXTS) - if err != nil { - return err - } - - for _, item := range items { + for _, item := range hub.GetInstalledByType(cwhub.CONTEXTS, true) { // context in item files goes under the key 'context' - if err = addContextFromItem(c.Crowdsec.ContextToSend, item); err != nil { + if err := addContextFromItem(c.Crowdsec.ContextToSend, item); err != nil { return err } } diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 96f977b4738..30784b23db0 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -40,7 +40,6 @@ const ( ) func (h *Hook) Build(hookStage int) error { - ctx := map[string]interface{}{} switch hookStage { case hookOnLoad: @@ -54,7 +53,7 @@ func (h *Hook) Build(hookStage int) error { } opts := exprhelpers.GetExprOptions(ctx) if h.Filter != "" { - program, err := expr.Compile(h.Filter, opts...) //FIXME: opts + program, err := expr.Compile(h.Filter, opts...) // FIXME: opts if err != nil { return fmt.Errorf("unable to compile filter %s : %w", h.Filter, err) } @@ -73,11 +72,11 @@ func (h *Hook) Build(hookStage int) error { type AppsecTempResponse struct { InBandInterrupt bool OutOfBandInterrupt bool - Action string //allow, deny, captcha, log - UserHTTPResponseCode int //The response code to send to the user - BouncerHTTPResponseCode int //The response code to send to the remediation component - SendEvent bool //do we send an internal event on rule match - SendAlert bool //do we send an alert on rule match + Action string // allow, deny, captcha, log + UserHTTPResponseCode int // The response code to send to the user + BouncerHTTPResponseCode int // The response code to send to the remediation component + SendEvent bool // do we send an internal event on rule match + SendAlert bool // do we send an alert on rule match } type AppsecSubEngineOpts struct { @@ -93,7 +92,7 @@ type AppsecRuntimeConfig struct { InBandRules []AppsecCollection DefaultRemediation string - RemediationByTag map[string]string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + RemediationByTag map[string]string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME RemediationById map[int]string CompiledOnLoad []Hook CompiledPreEval []Hook @@ -101,22 +100,22 @@ type AppsecRuntimeConfig struct { CompiledOnMatch []Hook CompiledVariablesTracking []*regexp.Regexp Config *AppsecConfig - //CorazaLogger debuglog.Logger + // CorazaLogger debuglog.Logger - //those are ephemeral, created/destroyed with every req - OutOfBandTx ExtendedTransaction //is it a good idea ? - InBandTx ExtendedTransaction //is it a good idea ? + // those are ephemeral, created/destroyed with every req + OutOfBandTx ExtendedTransaction // is it a good idea ? + InBandTx ExtendedTransaction // is it a good idea ? Response AppsecTempResponse - //should we store matched rules here ? + // should we store matched rules here ? Logger *log.Entry - //Set by on_load to ignore some rules on loading + // Set by on_load to ignore some rules on loading DisabledInBandRuleIds []int - DisabledInBandRulesTags []string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + DisabledInBandRulesTags []string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME DisabledOutOfBandRuleIds []int - DisabledOutOfBandRulesTags []string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + DisabledOutOfBandRulesTags []string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME } type AppsecConfig struct { @@ -125,10 +124,10 @@ type AppsecConfig struct { InBandRules []string `yaml:"inband_rules"` DefaultRemediation string `yaml:"default_remediation"` DefaultPassAction string `yaml:"default_pass_action"` - BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` //returned to the bouncer - BouncerPassedHTTPCode int `yaml:"passed_http_code"` //returned to the bouncer - UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` //returned to the user - UserPassedHTTPCode int `yaml:"user_passed_http_code"` //returned to the user + BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` // returned to the bouncer + BouncerPassedHTTPCode int `yaml:"passed_http_code"` // returned to the bouncer + UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` // returned to the user + UserPassedHTTPCode int `yaml:"user_passed_http_code"` // returned to the user OnLoad []Hook `yaml:"on_load"` PreEval []Hook `yaml:"pre_eval"` @@ -152,7 +151,6 @@ func (w *AppsecRuntimeConfig) ClearResponse() { } func (wc *AppsecConfig) LoadByPath(file string) error { - wc.Logger.Debugf("loading config %s", file) yamlFile, err := os.ReadFile(file) @@ -177,19 +175,13 @@ func (wc *AppsecConfig) LoadByPath(file string) error { } func (wc *AppsecConfig) Load(configName string) error { - appsecConfigs := hub.GetItemMap(cwhub.APPSEC_CONFIGS) + item := hub.GetItem(cwhub.APPSEC_CONFIGS, configName) - for _, hubAppsecConfigItem := range appsecConfigs { - if !hubAppsecConfigItem.State.Installed { - continue - } - if hubAppsecConfigItem.Name != configName { - continue - } - wc.Logger.Infof("loading %s", hubAppsecConfigItem.State.LocalPath) - err := wc.LoadByPath(hubAppsecConfigItem.State.LocalPath) + if item != nil && item.State.Installed { + wc.Logger.Infof("loading %s", item.State.LocalPath) + err := wc.LoadByPath(item.State.LocalPath) if err != nil { - return fmt.Errorf("unable to load appsec-config %s : %s", hubAppsecConfigItem.State.LocalPath, err) + return fmt.Errorf("unable to load appsec-config %s : %s", item.State.LocalPath, err) } return nil } @@ -224,10 +216,10 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { wc.DefaultRemediation = BanRemediation } - //set the defaults + // set the defaults switch wc.DefaultRemediation { case BanRemediation, CaptchaRemediation, AllowRemediation: - //those are the officially supported remediation(s) + // those are the officially supported remediation(s) default: wc.Logger.Warningf("default '%s' remediation of %s is none of [%s,%s,%s] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name, BanRemediation, CaptchaRemediation, AllowRemediation) } @@ -237,7 +229,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret.DefaultRemediation = wc.DefaultRemediation wc.Logger.Tracef("Loading config %+v", wc) - //load rules + // load rules for _, rule := range wc.OutOfBandRules { wc.Logger.Infof("loading outofband rule %s", rule) collections, err := LoadCollection(rule, wc.Logger.WithField("component", "appsec_collection_loader")) @@ -259,7 +251,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { wc.Logger.Infof("Loaded %d inband rules", len(ret.InBandRules)) - //load hooks + // load hooks for _, hook := range wc.OnLoad { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for on_load hook : %s", hook.OnSuccess) @@ -304,7 +296,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret.CompiledOnMatch = append(ret.CompiledOnMatch, hook) } - //variable tracking + // variable tracking for _, variable := range wc.VariablesTracking { compiledVariableRule, err := regexp.Compile(variable) if err != nil { @@ -460,7 +452,6 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) - if err != nil { w.Logger.Errorf("unable to apply appsec post_eval expr: %s", err) continue @@ -604,7 +595,7 @@ func (w *AppsecRuntimeConfig) SetActionByName(name string, action string) error } func (w *AppsecRuntimeConfig) SetAction(action string) error { - //log.Infof("setting to %s", action) + // log.Infof("setting to %s", action) w.Logger.Debugf("setting action to %s", action) w.Response.Action = action return nil @@ -628,7 +619,7 @@ func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logg if response.Action == AllowRemediation { resp.HTTPStatus = w.Config.UserPassedHTTPCode bouncerStatusCode = w.Config.BouncerPassedHTTPCode - } else { //ban, captcha and anything else + } else { // ban, captcha and anything else resp.HTTPStatus = response.UserHTTPResponseCode if resp.HTTPStatus == 0 { resp.HTTPStatus = w.Config.UserBlockedHTTPCode diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 56ec23e3671..9a3bfb6b668 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -9,19 +9,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this +var appsecRules = make(map[string]AppsecCollectionConfig) // FIXME: would probably be better to have a struct for this -var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package +var hub *cwhub.Hub // FIXME: this is a temporary hack to make the hub available in the package func LoadAppsecRules(hubInstance *cwhub.Hub) error { hub = hubInstance appsecRules = make(map[string]AppsecCollectionConfig) - for _, hubAppsecRuleItem := range hub.GetItemMap(cwhub.APPSEC_RULES) { - if !hubAppsecRuleItem.State.Installed { - continue - } - + for _, hubAppsecRuleItem := range hub.GetInstalledByType(cwhub.APPSEC_RULES, false) { content, err := os.ReadFile(hubAppsecRuleItem.State.LocalPath) if err != nil { log.Warnf("unable to read file %s : %s", hubAppsecRuleItem.State.LocalPath, err) diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 0a9cc443ce0..d8607e7e562 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -4,7 +4,6 @@ import ( "fmt" "net/http" "path/filepath" - "sort" "strings" "time" @@ -45,10 +44,3 @@ func safePath(dir, filePath string) (string, error) { return absFilePath, nil } - -// SortItemSlice sorts a slice of items by name, case insensitive. -func SortItemSlice(items []*Item) { - sort.Slice(items, func(i, j int) bool { - return strings.ToLower(items[i].Name) < strings.ToLower(items[j].Name) - }) -} diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 89d8de0fa8b..f86b95c6454 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -74,7 +74,7 @@ // Now you can use the hub object to access the existing items: // // // list all the parsers -// for _, parser := range hub.GetItemMap(cwhub.PARSERS) { +// for _, parser := range hub.GetItemsByType(cwhub.PARSERS, false) { // fmt.Printf("parser: %s\n", parser.Name) // } // diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 20a628a493f..a4e81e2c3e2 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -8,11 +8,12 @@ import ( "io" "os" "path" - "slices" "strings" "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) @@ -117,13 +118,14 @@ func (h *Hub) ItemStats() []string { tainted := 0 for _, itemType := range ItemTypes { - if len(h.GetItemMap(itemType)) == 0 { + items := h.GetItemsByType(itemType, false) + if len(items) == 0 { continue } - loaded += fmt.Sprintf("%d %s, ", len(h.GetItemMap(itemType)), itemType) + loaded += fmt.Sprintf("%d %s, ", len(items), itemType) - for _, item := range h.GetItemMap(itemType) { + for _, item := range items { if item.State.IsLocal() { local++ } @@ -218,73 +220,62 @@ func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { return i, nil } -// GetNamesByType returns a slice of (full) item names for a given type -// (eg. for collections: crowdsecurity/apache2 crowdsecurity/nginx). -func (h *Hub) GetNamesByType(itemType string) []string { - m := h.GetItemMap(itemType) - if m == nil { - return nil - } +// GetItemsByType returns a slice of all the items of a given type, installed or not, optionally sorted by case-insensitive name. +// A non-existent type will silently return an empty slice. +func (h *Hub) GetItemsByType(itemType string, sorted bool) []*Item { + items := h.items[itemType] - names := make([]string, 0, len(m)) - for k := range m { - names = append(names, k) - } + ret := make([]*Item, len(items)) - return names -} + if sorted { + for idx, name := range maptools.SortedKeysNoCase(items) { + ret[idx] = items[name] + } -// GetItemsByType returns a slice of all the items of a given type, installed or not. -func (h *Hub) GetItemsByType(itemType string) ([]*Item, error) { - if !slices.Contains(ItemTypes, itemType) { - return nil, fmt.Errorf("invalid item type %s", itemType) + return ret } - items := h.items[itemType] - - ret := make([]*Item, len(items)) - idx := 0 - for _, item := range items { ret[idx] = item - idx++ + idx += 1 } - return ret, nil + return ret } -// GetInstalledItemsByType returns a slice of the installed items of a given type. -func (h *Hub) GetInstalledItemsByType(itemType string) ([]*Item, error) { - if !slices.Contains(ItemTypes, itemType) { - return nil, fmt.Errorf("invalid item type %s", itemType) - } - - items := h.items[itemType] +// GetInstalledByType returns a slice of all the installed items of a given type, optionally sorted by case-insensitive name. +// A non-existent type will silently return an empty slice. +func (h *Hub) GetInstalledByType(itemType string, sorted bool) []*Item { + ret := make([]*Item, 0) - retItems := make([]*Item, 0) - - for _, item := range items { + for _, item := range h.GetItemsByType(itemType, sorted) { if item.State.Installed { - retItems = append(retItems, item) + ret = append(ret, item) } } - return retItems, nil + return ret } -// GetInstalledNamesByType returns the names of the installed items of a given type. -func (h *Hub) GetInstalledNamesByType(itemType string) ([]string, error) { - items, err := h.GetInstalledItemsByType(itemType) - if err != nil { - return nil, err - } +// GetInstalledListForAPI returns a slice of names of all the installed scenarios and appsec-rules. +// The returned list is sorted by type (scenarios first) and case-insensitive name. +func (h *Hub) GetInstalledListForAPI() []string { + scenarios := h.GetInstalledByType(SCENARIOS, true) + appsecRules := h.GetInstalledByType(APPSEC_RULES, true) + + ret := make([]string, len(scenarios)+len(appsecRules)) - retStr := make([]string, len(items)) + idx := 0 + for _, item := range scenarios { + ret[idx] = item.Name + idx += 1 + } - for idx, it := range items { - retStr[idx] = it.Name + for _, item := range appsecRules { + ret[idx] = item.Name + idx += 1 } - return retStr, nil + return ret } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index da4969ee8dd..42792413b5d 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -223,39 +223,30 @@ func (t *HubTestItem) InstallHub() error { ctx := context.Background() // install data for parsers if needed - ret := hub.GetItemMap(cwhub.PARSERS) - for parserName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) - } - - log.Debugf("parser '%s' installed successfully in runtime environment", parserName) + for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("parser '%s' installed successfully in runtime environment", item.Name) } // install data for scenarios if needed - ret = hub.GetItemMap(cwhub.SCENARIOS) - for scenarioName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) - } - - log.Debugf("scenario '%s' installed successfully in runtime environment", scenarioName) + for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("scenario '%s' installed successfully in runtime environment", item.Name) } // install data for postoverflows if needed - ret = hub.GetItemMap(cwhub.POSTOVERFLOWS) - for postoverflowName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) - } - - log.Debugf("postoverflow '%s' installed successfully in runtime environment", postoverflowName) + for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("postoverflow '%s' installed successfully in runtime environment", item.Name) } return nil diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 280d122ecc1..351de8ade56 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -66,21 +66,20 @@ func NewParsers(hub *cwhub.Hub) *Parsers { } for _, itemType := range []string{cwhub.PARSERS, cwhub.POSTOVERFLOWS} { - for _, hubParserItem := range hub.GetItemMap(itemType) { - if hubParserItem.State.Installed { - stagefile := Stagefile{ - Filename: hubParserItem.State.LocalPath, - Stage: hubParserItem.Stage, - } - if itemType == cwhub.PARSERS { - parsers.StageFiles = append(parsers.StageFiles, stagefile) - } - if itemType == cwhub.POSTOVERFLOWS { - parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) - } + for _, hubParserItem := range hub.GetInstalledByType(itemType, false) { + stagefile := Stagefile{ + Filename: hubParserItem.State.LocalPath, + Stage: hubParserItem.Stage, + } + if itemType == cwhub.PARSERS { + parsers.StageFiles = append(parsers.StageFiles, stagefile) + } + if itemType == cwhub.POSTOVERFLOWS { + parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) } } } + if parsers.StageFiles != nil { sort.Slice(parsers.StageFiles, func(i, j int) bool { return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename @@ -101,13 +100,17 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { patternsDir := cConfig.ConfigPaths.PatternDir log.Infof("Loading grok library %s", patternsDir) /* load base regexps for two grok parsers */ - parsers.Ctx, err = Init(map[string]interface{}{"patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir}) + parsers.Ctx, err = Init(map[string]interface{}{ + "patterns": patternsDir, + "data": cConfig.ConfigPaths.DataDir, + }) if err != nil { return parsers, fmt.Errorf("failed to load parser patterns : %v", err) } - parsers.Povfwctx, err = Init(map[string]interface{}{"patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir}) + parsers.Povfwctx, err = Init(map[string]interface{}{ + "patterns": patternsDir, + "data": cConfig.ConfigPaths.DataDir, + }) if err != nil { return parsers, fmt.Errorf("failed to load postovflw parser patterns : %v", err) } diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index d5154c1a0d7..830d0668cbb 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -51,7 +51,7 @@ setup() { config_enable_capi rune -0 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX rune -1 cscli capi status - assert_stderr --partial "no scenarios installed, abort" + assert_stderr --partial "no scenarios or appsec-rules installed, abort" rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status From da495e812c90a266f03dad3255ee09742cf86b41 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:05:55 +0200 Subject: [PATCH 258/318] pkg/cwhub: cache control / send etag header from file contents, check with HEAD req (#3187) --- go.mod | 2 +- go.sum | 4 ++-- pkg/cwhub/itemupgrade.go | 1 + pkg/cwhub/remote.go | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index af9d7550b94..9377e116519 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.13 + github.com/crowdsecurity/go-cs-lib v0.0.15 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 282f10d6367..b76c7fccd1a 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.13 h1:asmtjIEPOibUK8eaYQCIR7XIBU/EX5vyAp1EbKFQJtY= -github.com/crowdsecurity/go-cs-lib v0.0.13/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= +github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= +github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 441948c9168..105e5ebec31 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -162,6 +162,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin New(). WithHTTPClient(hubClient). ToFile(destPath). + WithETagFn(downloader.SHA256). WithMakeDirs(true). WithLogger(logrus.WithField("url", url)). CompareContent(). diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 0678a7488f8..04e4fab972f 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -45,6 +45,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e New(). WithHTTPClient(hubClient). ToFile(destPath). + WithETagFn(downloader.SHA256). CompareContent(). WithLogger(logrus.WithField("url", url)). Download(ctx, url) From 9c0422f75b0b67209cc47680163cccac598fe832 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 26 Aug 2024 09:39:36 +0200 Subject: [PATCH 259/318] cscli refact: package 'clicapi', 'clilapi' (#3185) * extract functions to own files * package clilapi * package clicapi * package crowdsec-cli/reload --- .golangci.yml | 2 +- cmd/crowdsec-cli/{ => clicapi}/capi.go | 14 ++-- cmd/crowdsec-cli/cliconsole/console.go | 11 ++-- cmd/crowdsec-cli/{ => clilapi}/lapi.go | 16 +++-- cmd/crowdsec-cli/{ => clilapi}/lapi_test.go | 2 +- cmd/crowdsec-cli/clilapi/utils.go | 24 +++++++ cmd/crowdsec-cli/dashboard.go | 3 +- cmd/crowdsec-cli/idgen/machineid.go | 48 ++++++++++++++ cmd/crowdsec-cli/idgen/password.go | 32 ++++++++++ cmd/crowdsec-cli/itemcli.go | 11 ++-- cmd/crowdsec-cli/machines.go | 71 +-------------------- cmd/crowdsec-cli/main.go | 8 ++- cmd/crowdsec-cli/reload.go | 6 -- cmd/crowdsec-cli/reload/reload.go | 6 ++ cmd/crowdsec-cli/reload/reload_freebsd.go | 4 ++ cmd/crowdsec-cli/reload/reload_linux.go | 4 ++ cmd/crowdsec-cli/reload/reload_windows.go | 3 + cmd/crowdsec-cli/reload_freebsd.go | 4 -- cmd/crowdsec-cli/reload_linux.go | 4 -- cmd/crowdsec-cli/reload_windows.go | 3 - cmd/crowdsec-cli/simulation.go | 3 +- cmd/crowdsec-cli/support.go | 6 +- cmd/crowdsec-cli/utils.go | 23 ------- 23 files changed, 169 insertions(+), 139 deletions(-) rename cmd/crowdsec-cli/{ => clicapi}/capi.go (93%) rename cmd/crowdsec-cli/{ => clilapi}/lapi.go (97%) rename cmd/crowdsec-cli/{ => clilapi}/lapi_test.go (98%) create mode 100644 cmd/crowdsec-cli/clilapi/utils.go create mode 100644 cmd/crowdsec-cli/idgen/machineid.go create mode 100644 cmd/crowdsec-cli/idgen/password.go delete mode 100644 cmd/crowdsec-cli/reload.go create mode 100644 cmd/crowdsec-cli/reload/reload.go create mode 100644 cmd/crowdsec-cli/reload/reload_freebsd.go create mode 100644 cmd/crowdsec-cli/reload/reload_linux.go create mode 100644 cmd/crowdsec-cli/reload/reload_windows.go delete mode 100644 cmd/crowdsec-cli/reload_freebsd.go delete mode 100644 cmd/crowdsec-cli/reload_linux.go delete mode 100644 cmd/crowdsec-cli/reload_windows.go diff --git a/.golangci.yml b/.golangci.yml index 2b216259770..86771f17f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -487,7 +487,7 @@ issues: - linters: - revive - path: "cmd/crowdsec-cli/machines.go" + path: "cmd/crowdsec-cli/idgen/password.go" text: "deep-exit: .*" - linters: diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/clicapi/capi.go similarity index 93% rename from cmd/crowdsec-cli/capi.go rename to cmd/crowdsec-cli/clicapi/capi.go index 64bca9f7caf..bf45613c776 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -1,4 +1,4 @@ -package main +package clicapi import ( "context" @@ -12,6 +12,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -21,11 +23,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + type cliCapi struct { cfg configGetter } -func NewCLICapi(cfg configGetter) *cliCapi { +func New(cfg configGetter) *cliCapi { return &cliCapi{ cfg: cfg, } @@ -56,12 +60,12 @@ func (cli *cliCapi) NewCommand() *cobra.Command { func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { cfg := cli.cfg() - capiUser, err := generateID(capiUserPrefix) + capiUser, err := idgen.GenerateMachineID(capiUserPrefix) if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } - password := strfmt.Password(generatePassword(passwordLength)) + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) apiurl, err := url.Parse(types.CAPIBaseURL) if err != nil { @@ -114,7 +118,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { fmt.Println(string(apiConfigDump)) } - log.Warning(reloadMessage) + log.Warning(reload.Message) return nil } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 158f33c9136..995a082c514 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -30,14 +31,12 @@ import ( type configGetter func() *csconfig.Config type cliConsole struct { - cfg func() *csconfig.Config - reloadMessage string + cfg configGetter } -func New(cfg configGetter, reloadMessage string) *cliConsole { +func New(cfg configGetter) *cliConsole { return &cliConsole{ cfg: cfg, - reloadMessage: reloadMessage, } } @@ -215,7 +214,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Info(cli.reloadMessage) + log.Info(reload.Message) return nil }, @@ -249,7 +248,7 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Info(cli.reloadMessage) + log.Info(reload.Message) return nil }, diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go similarity index 97% rename from cmd/crowdsec-cli/lapi.go rename to cmd/crowdsec-cli/clilapi/lapi.go index 6e13dd94436..ec66daf16a4 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -1,4 +1,4 @@ -package main +package clilapi import ( "context" @@ -15,6 +15,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/apiclient" @@ -28,11 +30,13 @@ import ( const LAPIURLPrefix = "v1" +type configGetter func() *csconfig.Config + type cliLapi struct { - cfg configGetter + cfg configGetter } -func NewCLILapi(cfg configGetter) *cliLapi { +func New(cfg configGetter) *cliLapi { return &cliLapi{ cfg: cfg, } @@ -100,13 +104,13 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e cfg := cli.cfg() if lapiUser == "" { - lapiUser, err = generateID("") + lapiUser, err = idgen.GenerateMachineID("") if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } } - password := strfmt.Password(generatePassword(passwordLength)) + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { @@ -158,7 +162,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(reloadMessage) + log.Warning(reload.Message) return nil } diff --git a/cmd/crowdsec-cli/lapi_test.go b/cmd/crowdsec-cli/clilapi/lapi_test.go similarity index 98% rename from cmd/crowdsec-cli/lapi_test.go rename to cmd/crowdsec-cli/clilapi/lapi_test.go index 018ecad8118..caf986d847a 100644 --- a/cmd/crowdsec-cli/lapi_test.go +++ b/cmd/crowdsec-cli/clilapi/lapi_test.go @@ -1,4 +1,4 @@ -package main +package clilapi import ( "testing" diff --git a/cmd/crowdsec-cli/clilapi/utils.go b/cmd/crowdsec-cli/clilapi/utils.go new file mode 100644 index 00000000000..e3ec65f2145 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/utils.go @@ -0,0 +1,24 @@ +package clilapi + +func removeFromSlice(val string, slice []string) []string { + var i int + var value string + + valueFound := false + + // get the index + for i, value = range slice { + if value == val { + valueFound = true + break + } + } + + if valueFound { + slice[i] = slice[len(slice)-1] + slice[len(slice)-1] = "" + slice = slice[:len(slice)-1] + } + + return slice +} diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 96a2fa38cb7..eb4a2a5956a 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -20,6 +20,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" ) @@ -137,7 +138,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if metabasePassword == "" { isValid := passwordIsValid(metabasePassword) for !isValid { - metabasePassword = generatePassword(16) + metabasePassword = idgen.GeneratePassword(16) isValid = passwordIsValid(metabasePassword) } } diff --git a/cmd/crowdsec-cli/idgen/machineid.go b/cmd/crowdsec-cli/idgen/machineid.go new file mode 100644 index 00000000000..4bd356b3abc --- /dev/null +++ b/cmd/crowdsec-cli/idgen/machineid.go @@ -0,0 +1,48 @@ +package idgen + +import ( + "fmt" + "strings" + + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/machineid" +) + +// Returns a unique identifier for each crowdsec installation, using an +// identifier of the OS installation where available, otherwise a random +// string. +func generateMachineIDPrefix() (string, error) { + prefix, err := machineid.ID() + if err == nil { + return prefix, nil + } + + log.Debugf("failed to get machine-id with usual files: %s", err) + + bID, err := uuid.NewRandom() + if err == nil { + return bID.String(), nil + } + + return "", fmt.Errorf("generating machine id: %w", err) +} + +// Generate a unique identifier, composed by a prefix and a random suffix. +// The prefix can be provided by a parameter to use in test environments. +func GenerateMachineID(prefix string) (string, error) { + var err error + if prefix == "" { + prefix, err = generateMachineIDPrefix() + } + + if err != nil { + return "", err + } + + prefix = strings.ReplaceAll(prefix, "-", "")[:32] + suffix := GeneratePassword(16) + + return prefix + suffix, nil +} diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go new file mode 100644 index 00000000000..cd798fdcc48 --- /dev/null +++ b/cmd/crowdsec-cli/idgen/password.go @@ -0,0 +1,32 @@ +package idgen + +import ( + "math/big" + saferand "crypto/rand" + + log "github.com/sirupsen/logrus" +) + +const PasswordLength = 64 + +func GeneratePassword(length int) string { + upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" + lower := "abcdefghijklmnopqrstuvwxyz" + digits := "0123456789" + + charset := upper + lower + digits + charsetLength := len(charset) + + buf := make([]byte, length) + + for i := range length { + rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) + if err != nil { + log.Fatalf("failed getting data from prng for password generation : %s", err) + } + + buf[i] = charset[rInt.Int64()] + } + + return string(buf) +} diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 85647a5f6e8..11ac1232eae 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -92,7 +93,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool } } - log.Info(reloadMessage) + log.Info(reload.Message) return nil } @@ -170,7 +171,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil @@ -212,7 +213,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil @@ -273,7 +274,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b log.Infof("Updated %d %s", updated, cli.name) if updated > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil @@ -304,7 +305,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if updated > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index dcdb1963b49..34d0b1b9208 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -1,13 +1,11 @@ package main import ( - saferand "crypto/rand" "encoding/csv" "encoding/json" "errors" "fmt" "io" - "math/big" "os" "slices" "strings" @@ -16,15 +14,13 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" "github.com/go-openapi/strfmt" - "github.com/google/uuid" "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/machineid" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -34,67 +30,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const passwordLength = 64 - -func generatePassword(length int) string { - upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" - lower := "abcdefghijklmnopqrstuvwxyz" - digits := "0123456789" - - charset := upper + lower + digits - charsetLength := len(charset) - - buf := make([]byte, length) - - for i := range length { - rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) - if err != nil { - log.Fatalf("failed getting data from prng for password generation : %s", err) - } - - buf[i] = charset[rInt.Int64()] - } - - return string(buf) -} - -// Returns a unique identifier for each crowdsec installation, using an -// identifier of the OS installation where available, otherwise a random -// string. -func generateIDPrefix() (string, error) { - prefix, err := machineid.ID() - if err == nil { - return prefix, nil - } - - log.Debugf("failed to get machine-id with usual files: %s", err) - - bID, err := uuid.NewRandom() - if err == nil { - return bID.String(), nil - } - - return "", fmt.Errorf("generating machine id: %w", err) -} - -// Generate a unique identifier, composed by a prefix and a random suffix. -// The prefix can be provided by a parameter to use in test environments. -func generateID(prefix string) (string, error) { - var err error - if prefix == "" { - prefix, err = generateIDPrefix() - } - - if err != nil { - return "", err - } - - prefix = strings.ReplaceAll(prefix, "-", "")[:32] - suffix := generatePassword(16) - - return prefix + suffix, nil -} - // getLastHeartbeat returns the last heartbeat timestamp of a machine // and a boolean indicating if the machine is considered active or not. func getLastHeartbeat(m *ent.Machine) (string, bool) { @@ -364,7 +299,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return errors.New("please specify a machine name to add, or use --auto") } - machineID, err = generateID("") + machineID, err = idgen.GenerateMachineID("") if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } @@ -401,7 +336,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return errors.New("please specify a password with --password or use --auto") } - machinePassword = generatePassword(passwordLength) + machinePassword = idgen.GeneratePassword(idgen.PasswordLength) } else if machinePassword == "" && interactive { qs := &survey.Password{ Message: "Please provide a password for the machine:", diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 49140b160ad..9124dde0607 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,8 +14,10 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -250,10 +252,10 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) + cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) + cmd.AddCommand(clilapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(cliconsole.New(cli.cfg, reloadMessage).NewCommand()) + cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/reload.go b/cmd/crowdsec-cli/reload.go deleted file mode 100644 index 8dd59be8d05..00000000000 --- a/cmd/crowdsec-cli/reload.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !windows && !freebsd && !linux - -package main - -// generic message since we don't know the platform -const reloadMessage = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload.go b/cmd/crowdsec-cli/reload/reload.go new file mode 100644 index 00000000000..fe03af1ea79 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package reload + +// generic message since we don't know the platform +const Message = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_freebsd.go b/cmd/crowdsec-cli/reload/reload_freebsd.go new file mode 100644 index 00000000000..0dac99f2315 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_freebsd.go @@ -0,0 +1,4 @@ +package reload + +// actually sudo is not that popular on freebsd, but this will do +const Message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_linux.go b/cmd/crowdsec-cli/reload/reload_linux.go new file mode 100644 index 00000000000..fbe16e5f168 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_linux.go @@ -0,0 +1,4 @@ +package reload + +// assume systemd, although gentoo and others may differ +const Message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_windows.go b/cmd/crowdsec-cli/reload/reload_windows.go new file mode 100644 index 00000000000..88642425ae2 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_windows.go @@ -0,0 +1,3 @@ +package reload + +const Message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_freebsd.go b/cmd/crowdsec-cli/reload_freebsd.go deleted file mode 100644 index 991d3ea6080..00000000000 --- a/cmd/crowdsec-cli/reload_freebsd.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// actually sudo is not that popular on freebsd, but this will do -const reloadMessage = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_linux.go b/cmd/crowdsec-cli/reload_linux.go deleted file mode 100644 index a74adfbcdfd..00000000000 --- a/cmd/crowdsec-cli/reload_linux.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// assume systemd, although gentoo and others may differ -const reloadMessage = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_windows.go b/cmd/crowdsec-cli/reload_windows.go deleted file mode 100644 index ec9a0b10741..00000000000 --- a/cmd/crowdsec-cli/reload_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package main - -const reloadMessage = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 12c9980d588..afcc0a2af37 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -44,7 +45,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { - log.Info(reloadMessage) + log.Info(reload.Message) } }, } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index ef14f90df17..3ab1415f2ec 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,6 +22,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -237,7 +239,7 @@ func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + if err := clilapi.QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) } @@ -260,7 +262,7 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := clicapi.QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) } diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index f6c32094958..c51140836b8 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -38,26 +38,3 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * return nil } - -func removeFromSlice(val string, slice []string) []string { - var i int - var value string - - valueFound := false - - // get the index - for i, value = range slice { - if value == val { - valueFound = true - break - } - } - - if valueFound { - slice[i] = slice[len(slice)-1] - slice[len(slice)-1] = "" - slice = slice[:len(slice)-1] - } - - return slice -} From c4431b63853af40e48ccffb71756ba023c24a327 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:33:35 +0200 Subject: [PATCH 260/318] cscli refact: notifications, simulation, papi, setup (#3190) * package 'clinotifications' * package 'clisimulation' * package 'clipapi' * package 'cslisetup' --- .../{ => clinotifications}/notifications.go | 6 ++++-- .../{ => clinotifications}/notifications_table.go | 2 +- cmd/crowdsec-cli/{ => clipapi}/papi.go | 7 +++++-- cmd/crowdsec-cli/{ => clisetup}/setup.go | 6 ++++-- cmd/crowdsec-cli/{ => clisimulation}/simulation.go | 7 +++++-- cmd/crowdsec-cli/main.go | 12 ++++++++---- 6 files changed, 27 insertions(+), 13 deletions(-) rename cmd/crowdsec-cli/{ => clinotifications}/notifications.go (99%) rename cmd/crowdsec-cli/{ => clinotifications}/notifications_table.go (97%) rename cmd/crowdsec-cli/{ => clipapi}/papi.go (96%) rename cmd/crowdsec-cli/{ => clisetup}/setup.go (98%) rename cmd/crowdsec-cli/{ => clisimulation}/simulation.go (98%) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go similarity index 99% rename from cmd/crowdsec-cli/notifications.go rename to cmd/crowdsec-cli/clinotifications/notifications.go index 8c6b6631b33..04be09354c2 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -1,4 +1,4 @@ -package main +package clinotifications import ( "context" @@ -40,11 +40,13 @@ type NotificationsCfg struct { ids []uint } +type configGetter func() *csconfig.Config + type cliNotifications struct { cfg configGetter } -func NewCLINotifications(cfg configGetter) *cliNotifications { +func New(cfg configGetter) *cliNotifications { return &cliNotifications{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/clinotifications/notifications_table.go similarity index 97% rename from cmd/crowdsec-cli/notifications_table.go rename to cmd/crowdsec-cli/clinotifications/notifications_table.go index 2976797bd8a..0b6a3f58efc 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/clinotifications/notifications_table.go @@ -1,4 +1,4 @@ -package main +package clinotifications import ( "io" diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/clipapi/papi.go similarity index 96% rename from cmd/crowdsec-cli/papi.go rename to cmd/crowdsec-cli/clipapi/papi.go index a2fa0a90871..9a9e4fcaa8b 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -1,4 +1,4 @@ -package main +package clipapi import ( "fmt" @@ -12,13 +12,16 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) +type configGetter func() *csconfig.Config + type cliPapi struct { cfg configGetter } -func NewCLIPapi(cfg configGetter) *cliPapi { +func New(cfg configGetter) *cliPapi { return &cliPapi{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/clisetup/setup.go similarity index 98% rename from cmd/crowdsec-cli/setup.go rename to cmd/crowdsec-cli/clisetup/setup.go index d747af9225f..2a1a2bd0560 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -1,4 +1,4 @@ -package main +package clisetup import ( "bytes" @@ -18,11 +18,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/setup" ) +type configGetter func() *csconfig.Config + type cliSetup struct { cfg configGetter } -func NewCLISetup(cfg configGetter) *cliSetup { +func New(cfg configGetter) *cliSetup { return &cliSetup{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go similarity index 98% rename from cmd/crowdsec-cli/simulation.go rename to cmd/crowdsec-cli/clisimulation/simulation.go index afcc0a2af37..bf986f82c06 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -1,4 +1,4 @@ -package main +package clisimulation import ( "errors" @@ -12,14 +12,17 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) +type configGetter func() *csconfig.Config + type cliSimulation struct { cfg configGetter } -func NewCLISimulation(cfg configGetter) *cliSimulation { +func New(cfg configGetter) *cliSimulation { return &cliSimulation{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 9124dde0607..ab8b4f82518 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -19,6 +19,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -249,7 +253,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) + cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) @@ -258,9 +262,9 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) + cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) + cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) @@ -270,7 +274,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) + cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) } if len(os.Args) > 1 { From 27559d6636f794c5bc50aa62485d455cddaaee7a Mon Sep 17 00:00:00 2001 From: Robert Nagy Date: Tue, 27 Aug 2024 12:41:53 +0200 Subject: [PATCH 261/318] implement GetFSType on openbsd with the correct statfs struct member (#3191) --- pkg/types/getfstype.go | 2 +- pkg/types/getfstype_openbsd.go | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 pkg/types/getfstype_openbsd.go diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index c16eea5cf98..728e986bed0 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -1,4 +1,4 @@ -//go:build !windows && !freebsd +//go:build !windows && !freebsd && !openbsd package types diff --git a/pkg/types/getfstype_openbsd.go b/pkg/types/getfstype_openbsd.go new file mode 100644 index 00000000000..9ec254b7bec --- /dev/null +++ b/pkg/types/getfstype_openbsd.go @@ -0,0 +1,25 @@ +//go:build openbsd + +package types + +import ( + "fmt" + "syscall" +) + +func GetFSType(path string) (string, error) { + var fsStat syscall.Statfs_t + + if err := syscall.Statfs(path, &fsStat); err != nil { + return "", fmt.Errorf("failed to get filesystem type: %w", err) + } + + bs := fsStat.F_fstypename + + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + + return string(b), nil +} From ec415ed069d36091203e99a59ac231995ad9e49f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 Aug 2024 13:07:05 +0200 Subject: [PATCH 262/318] cscli hub update: option --with-content to keep embedded items in index; use it in docker (#3192) --- Dockerfile | 3 +-- Dockerfile.debian | 3 +-- cmd/crowdsec-cli/hub.go | 10 ++++++++-- docker/docker_start.sh | 2 +- pkg/cwhub/remote.go | 33 ++++++++++++++++++++++++++++++--- test/lib/config/config-local | 2 +- 6 files changed, 42 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index d4c8978c9ae..93ba1010fc2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,8 +24,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ - cscli hub update && \ - ./docker/preload-hub-items && \ + cscli hub update --with-content && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 73a44e8a4b8..dd55d2c3e48 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -29,8 +29,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ - cscli hub update && \ - ./docker/preload-hub-items && \ + cscli hub update --with-content && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index e6cba08940a..34bd61b1277 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -99,9 +99,10 @@ func (cli *cliHub) newListCmd() *cobra.Command { return cmd } -func (cli *cliHub) update(ctx context.Context) error { +func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub remote := require.RemoteHub(ctx, cli.cfg()) + remote.EmbedItemContent = withContent // don't use require.Hub because if there is no index file, it would fail hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) @@ -125,6 +126,8 @@ func (cli *cliHub) update(ctx context.Context) error { } func (cli *cliHub) newUpdateCmd() *cobra.Command { + withContent := false + cmd := &cobra.Command{ Use: "update", Short: "Download the latest index (catalog of available configurations)", @@ -134,10 +137,13 @@ Fetches the .index.json file from the hub, containing the list of available conf Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.update(cmd.Context()) + return cli.update(cmd.Context(), withContent) }, } + flags := cmd.Flags() + flags.BoolVar(&withContent, "with-content", false, "Download index with embedded item content") + return cmd } diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 0ae8841e029..fb87c1eff9b 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -57,7 +57,7 @@ run_hub_update() { index_modification_time=$(stat -c %Y /etc/crowdsec/hub/.index.json 2>/dev/null) # Run cscli hub update if no date or if the index file is older than 24h if [ -z "$index_modification_time" ] || [ $(( $(date +%s) - index_modification_time )) -gt 86400 ]; then - cscli hub update + cscli hub update --with-content else echo "Skipping hub update, index file is recent" fi diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 04e4fab972f..8d2dc2dbb94 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -3,6 +3,7 @@ package cwhub import ( "context" "fmt" + "net/url" "github.com/sirupsen/logrus" @@ -11,9 +12,10 @@ import ( // RemoteHubCfg is used to retrieve index and items from the remote hub. type RemoteHubCfg struct { - Branch string - URLTemplate string - IndexPath string + Branch string + URLTemplate string + IndexPath string + EmbedItemContent bool } // urlTo builds the URL to download a file from the remote hub. @@ -30,6 +32,24 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil } +// addURLParam adds the "with_content=true" parameter to the URL if it's not already present. +func addURLParam(rawURL string, param string, value string) (string, error) { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "", fmt.Errorf("failed to parse URL: %w", err) + } + + query := parsedURL.Query() + + if _, exists := query[param]; !exists { + query.Add(param, value) + } + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String(), nil +} + // fetchIndex downloads the index from the hub and returns the content. func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { if r == nil { @@ -41,6 +61,13 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e return false, fmt.Errorf("failed to build hub index request: %w", err) } + if r.EmbedItemContent { + url, err = addURLParam(url, "with_content", "true") + if err != nil { + return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) + } + } + downloaded, err := downloader. New(). WithHTTPClient(hubClient). diff --git a/test/lib/config/config-local b/test/lib/config/config-local index f80c26bc652..3e3c806b616 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -114,7 +114,7 @@ make_init_data() { ./instance-db config-yaml ./instance-db setup - "$CSCLI" --warning hub update + "$CSCLI" --warning hub update --with-content # preload some content and data files "$CSCLI" collections install crowdsecurity/linux --download-only From 2e970b3eb9869c11bef2414758c62419e18fedc7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:42:24 +0200 Subject: [PATCH 263/318] cscli refact: package 'clihubtest' (#3174) * cscli refact: package 'clihubtest' * split hubtest.go subcommands in files * extract function getCoverage() * common function hubTestCoverageTable() * update cyclomatic lint * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/clihubtest/clean.go | 31 + cmd/crowdsec-cli/clihubtest/coverage.go | 166 ++++ cmd/crowdsec-cli/clihubtest/create.go | 158 ++++ cmd/crowdsec-cli/clihubtest/eval.go | 44 ++ cmd/crowdsec-cli/clihubtest/explain.go | 53 ++ cmd/crowdsec-cli/clihubtest/hubtest.go | 81 ++ cmd/crowdsec-cli/clihubtest/info.go | 44 ++ cmd/crowdsec-cli/clihubtest/list.go | 42 + cmd/crowdsec-cli/clihubtest/run.go | 195 +++++ .../{hubtest_table.go => clihubtest/table.go} | 48 +- cmd/crowdsec-cli/hubtest.go | 746 ------------------ cmd/crowdsec-cli/main.go | 3 +- 13 files changed, 820 insertions(+), 793 deletions(-) create mode 100644 cmd/crowdsec-cli/clihubtest/clean.go create mode 100644 cmd/crowdsec-cli/clihubtest/coverage.go create mode 100644 cmd/crowdsec-cli/clihubtest/create.go create mode 100644 cmd/crowdsec-cli/clihubtest/eval.go create mode 100644 cmd/crowdsec-cli/clihubtest/explain.go create mode 100644 cmd/crowdsec-cli/clihubtest/hubtest.go create mode 100644 cmd/crowdsec-cli/clihubtest/info.go create mode 100644 cmd/crowdsec-cli/clihubtest/list.go create mode 100644 cmd/crowdsec-cli/clihubtest/run.go rename cmd/crowdsec-cli/{hubtest_table.go => clihubtest/table.go} (50%) delete mode 100644 cmd/crowdsec-cli/hubtest.go diff --git a/.golangci.yml b/.golangci.yml index 86771f17f60..62147b54101 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -103,7 +103,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [42] + arguments: [41] - name: defer disabled: true - name: empty-block diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go new file mode 100644 index 00000000000..075d7961d84 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -0,0 +1,31 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) NewCleanCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean", + Short: "clean [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go new file mode 100644 index 00000000000..b6e5b1e9c01 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -0,0 +1,166 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + "math" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +// getCoverage returns the coverage and the percentage of tests that passed +func getCoverage(show bool, getCoverageFunc func() ([]hubtest.Coverage, error)) ([]hubtest.Coverage, int, error) { + if !show { + return nil, 0, nil + } + + coverage, err := getCoverageFunc() + if err != nil { + return nil, 0, fmt.Errorf("while getting coverage: %w", err) + } + + tested := 0 + + for _, test := range coverage { + if test.TestsCount > 0 { + tested++ + } + } + + // keep coverage 0 if there's no tests? + percent := 0 + if len(coverage) > 0 { + percent = int(math.Round((float64(tested) / float64(len(coverage)) * 100))) + } + + return coverage, percent, nil +} + +func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { + cfg := cli.cfg() + + // for this one we explicitly don't do for appsec + if err := HubTest.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + + var err error + + // if all are false (flag by default), show them + if !showParserCov && !showScenarioCov && !showAppsecCov { + showParserCov = true + showScenarioCov = true + showAppsecCov = true + } + + parserCoverage, parserCoveragePercent, err := getCoverage(showParserCov, HubTest.GetParsersCoverage) + if err != nil { + return err + } + + scenarioCoverage, scenarioCoveragePercent, err := getCoverage(showScenarioCov, HubTest.GetScenariosCoverage) + if err != nil { + return err + } + + appsecRuleCoverage, appsecRuleCoveragePercent, err := getCoverage(showAppsecCov, HubTest.GetAppsecCoverage) + if err != nil { + return err + } + + if showOnlyPercent { + switch { + case showParserCov: + fmt.Printf("parsers=%d%%", parserCoveragePercent) + case showScenarioCov: + fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) + case showAppsecCov: + fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) + } + + return nil + } + + switch cfg.Cscli.Output { + case "human": + if showParserCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Parser", "Status", "Number of tests"}, parserCoverage) + } + + if showScenarioCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Scenario", "Status", "Number of tests"}, parserCoverage) + } + + if showAppsecCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Appsec Rule", "Status", "Number of tests"}, parserCoverage) + } + + fmt.Println() + + if showParserCov { + fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) + } + + if showScenarioCov { + fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) + } + + if showAppsecCov { + fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) + } + case "json": + dump, err := json.MarshalIndent(parserCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + default: + return errors.New("only human/json output modes are supported") + } + + return nil +} + +func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) + + cmd := &cobra.Command{ + Use: "coverage", + Short: "coverage", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) + }, + } + + cmd.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") + cmd.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") + cmd.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") + cmd.PersistentFlags().BoolVar(&showAppsecCov, "appsec", false, "Show only appsec coverage") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go new file mode 100644 index 00000000000..523c9740cf7 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -0,0 +1,158 @@ +package clihubtest + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) NewCreateCmd() *cobra.Command { + var ( + ignoreParsers bool + labels map[string]string + logType string + ) + + parsers := []string{} + postoverflows := []string{} + scenarios := []string{} + + cmd := &cobra.Command{ + Use: "create", + Short: "create [test_name]", + Example: `cscli hubtest create my-awesome-test --type syslog +cscli hubtest create my-nginx-custom-test --type nginx +cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + testName := args[0] + testPath := filepath.Join(hubPtr.HubTestPath, testName) + if _, err := os.Stat(testPath); os.IsExist(err) { + return fmt.Errorf("test '%s' already exists in '%s', exiting", testName, testPath) + } + + if isAppsecTest { + logType = "appsec" + } + + if logType == "" { + return errors.New("please provide a type (--type) for the test") + } + + if err := os.MkdirAll(testPath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", testPath, err) + } + + configFilePath := filepath.Join(testPath, "config.yaml") + + configFileData := &hubtest.HubTestItemConfig{} + if logType == "appsec" { + // create empty nuclei template file + nucleiFileName := testName + ".yaml" + nucleiFilePath := filepath.Join(testPath, nucleiFileName) + + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) + if err != nil { + return err + } + + ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) + if ntpl == nil { + return errors.New("unable to parse nuclei template") + } + ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) + nucleiFile.Close() + configFileData.AppsecRules = []string{"./appsec-rules//your_rule_here.yaml"} + configFileData.NucleiTemplate = nucleiFileName + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Config File : %s\n", configFilePath) + fmt.Printf(" Nuclei Template : %s\n", nucleiFilePath) + } else { + // create empty log file + logFileName := testName + ".log" + logFilePath := filepath.Join(testPath, logFileName) + logFile, err := os.Create(logFilePath) + if err != nil { + return err + } + logFile.Close() + + // create empty parser assertion file + parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) + parserAssertFile, err := os.Create(parserAssertFilePath) + if err != nil { + return err + } + parserAssertFile.Close() + // create empty scenario assertion file + scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) + scenarioAssertFile, err := os.Create(scenarioAssertFilePath) + if err != nil { + return err + } + scenarioAssertFile.Close() + + parsers = append(parsers, "crowdsecurity/syslog-logs") + parsers = append(parsers, "crowdsecurity/dateparse-enrich") + + if len(scenarios) == 0 { + scenarios = append(scenarios, "") + } + + if len(postoverflows) == 0 { + postoverflows = append(postoverflows, "") + } + configFileData.Parsers = parsers + configFileData.Scenarios = scenarios + configFileData.PostOverflows = postoverflows + configFileData.LogFile = logFileName + configFileData.LogType = logType + configFileData.IgnoreParsers = ignoreParsers + configFileData.Labels = labels + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) + fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) + fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) + fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) + } + + fd, err := os.Create(configFilePath) + if err != nil { + return fmt.Errorf("open: %w", err) + } + data, err := yaml.Marshal(configFileData) + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + _, err = fd.Write(data) + if err != nil { + return fmt.Errorf("write: %w", err) + } + if err := fd.Close(); err != nil { + return fmt.Errorf("close: %w", err) + } + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") + cmd.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") + cmd.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") + cmd.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") + cmd.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/eval.go b/cmd/crowdsec-cli/clihubtest/eval.go new file mode 100644 index 00000000000..204a0bc420e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/eval.go @@ -0,0 +1,44 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) NewEvalCmd() *cobra.Command { + var evalExpression string + + cmd := &cobra.Command{ + Use: "eval", + Short: "eval [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + return fmt.Errorf("can't load test results from '%s': %+v", test.ParserResultFile, err) + } + + output, err := test.ParserAssert.EvalExpression(evalExpression) + if err != nil { + return err + } + + fmt.Print(output) + } + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go new file mode 100644 index 00000000000..ecaf520211e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -0,0 +1,53 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/dumps" +) + +func (cli *cliHubTest) NewExplainCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "explain", + Short: "explain [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { + return fmt.Errorf("unable to load parser result after run: %w", err) + } + } + + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { + return fmt.Errorf("unable to load scenario result after run: %w", err) + } + } + opts := dumps.DumpOpts{} + dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go new file mode 100644 index 00000000000..22607336177 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -0,0 +1,81 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +type configGetter func() *csconfig.Config + +var ( + HubTest hubtest.HubTest + HubAppsecTests hubtest.HubTest + hubPtr *hubtest.HubTest + isAppsecTest bool +) + +type cliHubTest struct { + cfg configGetter +} + +func New(cfg configGetter) *cliHubTest { + return &cliHubTest{ + cfg: cfg, + } +} + +func (cli *cliHubTest) NewCommand() *cobra.Command { + var ( + hubPath string + crowdsecPath string + cscliPath string + ) + + cmd := &cobra.Command{ + Use: "hubtest", + Short: "Run functional tests on hub configurations", + Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + var err error + HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, false) + if err != nil { + return fmt.Errorf("unable to load hubtest: %+v", err) + } + + HubAppsecTests, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, true) + if err != nil { + return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) + } + + // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests + hubPtr = &HubTest + if isAppsecTest { + hubPtr = &HubAppsecTests + } + + return nil + }, + } + + cmd.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") + cmd.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") + cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") + cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") + + cmd.AddCommand(cli.NewCreateCmd()) + cmd.AddCommand(cli.NewRunCmd()) + cmd.AddCommand(cli.NewCleanCmd()) + cmd.AddCommand(cli.NewInfoCmd()) + cmd.AddCommand(cli.NewListCmd()) + cmd.AddCommand(cli.NewCoverageCmd()) + cmd.AddCommand(cli.NewEvalCmd()) + cmd.AddCommand(cli.NewExplainCmd()) + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/info.go b/cmd/crowdsec-cli/clihubtest/info.go new file mode 100644 index 00000000000..2e3fd132340 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/info.go @@ -0,0 +1,44 @@ +package clihubtest + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) NewInfoCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "info", + Short: "info [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + fmt.Println() + fmt.Printf(" Test name : %s\n", test.Name) + fmt.Printf(" Test path : %s\n", test.Path) + if isAppsecTest { + fmt.Printf(" Nuclei Template : %s\n", test.Config.NucleiTemplate) + fmt.Printf(" Appsec Rules : %s\n", strings.Join(test.Config.AppsecRules, ", ")) + } else { + fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) + fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) + fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) + } + fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/list.go b/cmd/crowdsec-cli/clihubtest/list.go new file mode 100644 index 00000000000..76c51927897 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/list.go @@ -0,0 +1,42 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) NewListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %w", err) + } + + switch cfg.Cscli.Output { + case "human": + hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) + case "json": + j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") + if err != nil { + return err + } + fmt.Println(string(j)) + default: + return errors.New("only human/json output modes are supported") + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go new file mode 100644 index 00000000000..552ee87c16e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -0,0 +1,195 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { + cfg := cli.cfg() + + if !runAll && len(args) == 0 { + return errors.New("please provide test to run or --all flag") + } + + hubPtr.NucleiTargetHost = NucleiTargetHost + hubPtr.AppSecHost = AppSecHost + + if runAll { + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + } else { + for _, testName := range args { + _, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + } + } + + // set timezone to avoid DST issues + os.Setenv("TZ", "UTC") + + for _, test := range hubPtr.Tests { + if cfg.Cscli.Output == "human" { + log.Infof("Running test '%s'", test.Name) + } + + err := test.Run() + if err != nil { + log.Errorf("running test '%s' failed: %+v", test.Name, err) + } + } + + return nil +} + +func (cli *cliHubTest) NewRunCmd() *cobra.Command { + var ( + noClean bool + runAll bool + forceClean bool + NucleiTargetHost string + AppSecHost string + ) + + cmd := &cobra.Command{ + Use: "run", + Short: "run [test_name]", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + return cli.run(runAll, NucleiTargetHost, AppSecHost, args) + }, + PersistentPostRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + + success := true + testResult := make(map[string]bool) + for _, test := range hubPtr.Tests { + if test.AutoGen && !isAppsecTest { + if test.ParserAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) + fmt.Println() + fmt.Println(test.ParserAssert.AutoGenAssertData) + } + if test.ScenarioAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) + fmt.Println() + fmt.Println(test.ScenarioAssert.AutoGenAssertData) + } + if !noClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + + return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) + } + testResult[test.Name] = test.Success + if test.Success { + if cfg.Cscli.Output == "human" { + log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) + } + if !noClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + } else { + success = false + cleanTestEnv := false + if cfg.Cscli.Output == "human" { + if len(test.ParserAssert.Fails) > 0 { + fmt.Println() + log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) + for _, fail := range test.ParserAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + fmt.Println() + } + } + if len(test.ScenarioAssert.Fails) > 0 { + fmt.Println() + log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) + for _, fail := range test.ScenarioAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + fmt.Println() + } + } + if !forceClean && !noClean { + prompt := &survey.Confirm{ + Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), + Default: true, + } + if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { + return fmt.Errorf("unable to ask to remove runtime folder: %w", err) + } + } + } + + if cleanTestEnv || forceClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + } + } + + switch cfg.Cscli.Output { + case "human": + hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) + case "json": + jsonResult := make(map[string][]string, 0) + jsonResult["success"] = make([]string, 0) + jsonResult["fail"] = make([]string, 0) + for testName, success := range testResult { + if success { + jsonResult["success"] = append(jsonResult["success"], testName) + } else { + jsonResult["fail"] = append(jsonResult["fail"], testName) + } + } + jsonStr, err := json.Marshal(jsonResult) + if err != nil { + return fmt.Errorf("unable to json test result: %w", err) + } + fmt.Println(string(jsonStr)) + default: + return errors.New("only human/json output modes are supported") + } + + if !success { + return errors.New("some tests failed") + } + + return nil + }, + } + + cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") + cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") + cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") + cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") + cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") + + return cmd +} diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/clihubtest/table.go similarity index 50% rename from cmd/crowdsec-cli/hubtest_table.go rename to cmd/crowdsec-cli/clihubtest/table.go index 1fa0f990be2..2a105a1f5c1 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/clihubtest/table.go @@ -1,4 +1,4 @@ -package main +package clihubtest import ( "fmt" @@ -42,51 +42,9 @@ func hubTestListTable(out io.Writer, wantColor string, tests []*hubtest.HubTestI t.Render() } -func hubTestParserCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { +func hubTestCoverageTable(out io.Writer, wantColor string, headers []string, coverage []hubtest.Coverage) { t := cstable.NewLight(out, wantColor) - t.SetHeaders("Parser", "Status", "Number of tests") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - parserTested := 0 - - for _, test := range coverage { - status := emoji.RedCircle - if test.TestsCount > 0 { - status = emoji.GreenCircle - parserTested++ - } - - t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) - } - - t.Render() -} - -func hubTestAppsecRuleCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Appsec Rule", "Status", "Number of tests") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - parserTested := 0 - - for _, test := range coverage { - status := emoji.RedCircle - if test.TestsCount > 0 { - status = emoji.GreenCircle - parserTested++ - } - - t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) - } - - t.Render() -} - -func hubTestScenarioCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Scenario", "Status", "Number of tests") + t.SetHeaders(headers...) t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go deleted file mode 100644 index 2a4635d39f1..00000000000 --- a/cmd/crowdsec-cli/hubtest.go +++ /dev/null @@ -1,746 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "strings" - "text/template" - - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/dumps" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/hubtest" -) - -var ( - HubTest hubtest.HubTest - HubAppsecTests hubtest.HubTest - hubPtr *hubtest.HubTest - isAppsecTest bool -) - -type cliHubTest struct { - cfg configGetter -} - -func NewCLIHubTest(cfg configGetter) *cliHubTest { - return &cliHubTest{ - cfg: cfg, - } -} - -func (cli *cliHubTest) NewCommand() *cobra.Command { - var ( - hubPath string - crowdsecPath string - cscliPath string - ) - - cmd := &cobra.Command{ - Use: "hubtest", - Short: "Run functional tests on hub configurations", - Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - var err error - HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, false) - if err != nil { - return fmt.Errorf("unable to load hubtest: %+v", err) - } - - HubAppsecTests, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, true) - if err != nil { - return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) - } - - // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests - hubPtr = &HubTest - if isAppsecTest { - hubPtr = &HubAppsecTests - } - - return nil - }, - } - - cmd.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") - cmd.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") - cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") - cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") - - cmd.AddCommand(cli.NewCreateCmd()) - cmd.AddCommand(cli.NewRunCmd()) - cmd.AddCommand(cli.NewCleanCmd()) - cmd.AddCommand(cli.NewInfoCmd()) - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewCoverageCmd()) - cmd.AddCommand(cli.NewEvalCmd()) - cmd.AddCommand(cli.NewExplainCmd()) - - return cmd -} - -func (cli *cliHubTest) NewCreateCmd() *cobra.Command { - var ( - ignoreParsers bool - labels map[string]string - logType string - ) - - parsers := []string{} - postoverflows := []string{} - scenarios := []string{} - - cmd := &cobra.Command{ - Use: "create", - Short: "create [test_name]", - Example: `cscli hubtest create my-awesome-test --type syslog -cscli hubtest create my-nginx-custom-test --type nginx -cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - testName := args[0] - testPath := filepath.Join(hubPtr.HubTestPath, testName) - if _, err := os.Stat(testPath); os.IsExist(err) { - return fmt.Errorf("test '%s' already exists in '%s', exiting", testName, testPath) - } - - if isAppsecTest { - logType = "appsec" - } - - if logType == "" { - return errors.New("please provide a type (--type) for the test") - } - - if err := os.MkdirAll(testPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", testPath, err) - } - - configFilePath := filepath.Join(testPath, "config.yaml") - - configFileData := &hubtest.HubTestItemConfig{} - if logType == "appsec" { - // create empty nuclei template file - nucleiFileName := fmt.Sprintf("%s.yaml", testName) - nucleiFilePath := filepath.Join(testPath, nucleiFileName) - - nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) - if err != nil { - return err - } - - ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) - if ntpl == nil { - return errors.New("unable to parse nuclei template") - } - ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) - nucleiFile.Close() - configFileData.AppsecRules = []string{"./appsec-rules//your_rule_here.yaml"} - configFileData.NucleiTemplate = nucleiFileName - fmt.Println() - fmt.Printf(" Test name : %s\n", testName) - fmt.Printf(" Test path : %s\n", testPath) - fmt.Printf(" Config File : %s\n", configFilePath) - fmt.Printf(" Nuclei Template : %s\n", nucleiFilePath) - } else { - // create empty log file - logFileName := fmt.Sprintf("%s.log", testName) - logFilePath := filepath.Join(testPath, logFileName) - logFile, err := os.Create(logFilePath) - if err != nil { - return err - } - logFile.Close() - - // create empty parser assertion file - parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) - parserAssertFile, err := os.Create(parserAssertFilePath) - if err != nil { - return err - } - parserAssertFile.Close() - // create empty scenario assertion file - scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) - scenarioAssertFile, err := os.Create(scenarioAssertFilePath) - if err != nil { - return err - } - scenarioAssertFile.Close() - - parsers = append(parsers, "crowdsecurity/syslog-logs") - parsers = append(parsers, "crowdsecurity/dateparse-enrich") - - if len(scenarios) == 0 { - scenarios = append(scenarios, "") - } - - if len(postoverflows) == 0 { - postoverflows = append(postoverflows, "") - } - configFileData.Parsers = parsers - configFileData.Scenarios = scenarios - configFileData.PostOverflows = postoverflows - configFileData.LogFile = logFileName - configFileData.LogType = logType - configFileData.IgnoreParsers = ignoreParsers - configFileData.Labels = labels - fmt.Println() - fmt.Printf(" Test name : %s\n", testName) - fmt.Printf(" Test path : %s\n", testPath) - fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) - fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) - fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) - fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) - } - - fd, err := os.Create(configFilePath) - if err != nil { - return fmt.Errorf("open: %w", err) - } - data, err := yaml.Marshal(configFileData) - if err != nil { - return fmt.Errorf("marshal: %w", err) - } - _, err = fd.Write(data) - if err != nil { - return fmt.Errorf("write: %w", err) - } - if err := fd.Close(); err != nil { - return fmt.Errorf("close: %w", err) - } - - return nil - }, - } - - cmd.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") - cmd.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") - cmd.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") - cmd.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") - cmd.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") - - return cmd -} - - -func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { - cfg := cli.cfg() - - if !runAll && len(args) == 0 { - return errors.New("please provide test to run or --all flag") - } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost - if runAll { - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - } else { - for _, testName := range args { - _, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - } - } - - // set timezone to avoid DST issues - os.Setenv("TZ", "UTC") - for _, test := range hubPtr.Tests { - if cfg.Cscli.Output == "human" { - log.Infof("Running test '%s'", test.Name) - } - err := test.Run() - if err != nil { - log.Errorf("running test '%s' failed: %+v", test.Name, err) - } - } - - return nil -} - - -func (cli *cliHubTest) NewRunCmd() *cobra.Command { - var ( - noClean bool - runAll bool - forceClean bool - NucleiTargetHost string - AppSecHost string - ) - - cmd := &cobra.Command{ - Use: "run", - Short: "run [test_name]", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.run(runAll, NucleiTargetHost, AppSecHost, args) - }, - PersistentPostRunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - success := true - testResult := make(map[string]bool) - for _, test := range hubPtr.Tests { - if test.AutoGen && !isAppsecTest { - if test.ParserAssert.AutoGenAssert { - log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) - fmt.Println() - fmt.Println(test.ParserAssert.AutoGenAssertData) - } - if test.ScenarioAssert.AutoGenAssert { - log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) - fmt.Println() - fmt.Println(test.ScenarioAssert.AutoGenAssertData) - } - if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) - } - testResult[test.Name] = test.Success - if test.Success { - if cfg.Cscli.Output == "human" { - log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) - } - if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - } else { - success = false - cleanTestEnv := false - if cfg.Cscli.Output == "human" { - if len(test.ParserAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) - for _, fail := range test.ParserAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if len(test.ScenarioAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) - for _, fail := range test.ScenarioAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if !forceClean && !noClean { - prompt := &survey.Confirm{ - Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), - Default: true, - } - if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { - return fmt.Errorf("unable to ask to remove runtime folder: %w", err) - } - } - } - - if cleanTestEnv || forceClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - } - } - - switch cfg.Cscli.Output { - case "human": - hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) - case "json": - jsonResult := make(map[string][]string, 0) - jsonResult["success"] = make([]string, 0) - jsonResult["fail"] = make([]string, 0) - for testName, success := range testResult { - if success { - jsonResult["success"] = append(jsonResult["success"], testName) - } else { - jsonResult["fail"] = append(jsonResult["fail"], testName) - } - } - jsonStr, err := json.Marshal(jsonResult) - if err != nil { - return fmt.Errorf("unable to json test result: %w", err) - } - fmt.Println(string(jsonStr)) - default: - return errors.New("only human/json output modes are supported") - } - - if !success { - return errors.New("some tests failed") - } - - return nil - }, - } - - cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") - cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") - cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") - cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") - cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") - - return cmd -} - -func (cli *cliHubTest) NewCleanCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "clean", - Short: "clean [test_name]", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) NewInfoCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "info", - Short: "info [test_name]", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - fmt.Println() - fmt.Printf(" Test name : %s\n", test.Name) - fmt.Printf(" Test path : %s\n", test.Path) - if isAppsecTest { - fmt.Printf(" Nuclei Template : %s\n", test.Config.NucleiTemplate) - fmt.Printf(" Appsec Rules : %s\n", strings.Join(test.Config.AppsecRules, ", ")) - } else { - fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) - fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) - fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) - } - fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) NewListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %w", err) - } - - switch cfg.Cscli.Output { - case "human": - hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) - case "json": - j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") - if err != nil { - return err - } - fmt.Println(string(j)) - default: - return errors.New("only human/json output modes are supported") - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { - cfg := cli.cfg() - - // for this one we explicitly don't do for appsec - if err := HubTest.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - - var err error - - scenarioCoverage := []hubtest.Coverage{} - parserCoverage := []hubtest.Coverage{} - appsecRuleCoverage := []hubtest.Coverage{} - scenarioCoveragePercent := 0 - parserCoveragePercent := 0 - appsecRuleCoveragePercent := 0 - - // if both are false (flag by default), show both - showAll := !showScenarioCov && !showParserCov && !showAppsecCov - - if showParserCov || showAll { - parserCoverage, err = HubTest.GetParsersCoverage() - if err != nil { - return fmt.Errorf("while getting parser coverage: %w", err) - } - - parserTested := 0 - - for _, test := range parserCoverage { - if test.TestsCount > 0 { - parserTested++ - } - } - - parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) - } - - if showScenarioCov || showAll { - scenarioCoverage, err = HubTest.GetScenariosCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - scenarioTested := 0 - - for _, test := range scenarioCoverage { - if test.TestsCount > 0 { - scenarioTested++ - } - } - - scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) - } - - if showAppsecCov || showAll { - appsecRuleCoverage, err = HubTest.GetAppsecCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - appsecRuleTested := 0 - - for _, test := range appsecRuleCoverage { - if test.TestsCount > 0 { - appsecRuleTested++ - } - } - - appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) - } - - if showOnlyPercent { - switch { - case showAll: - fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - case showParserCov: - fmt.Printf("parsers=%d%%", parserCoveragePercent) - case showScenarioCov: - fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - case showAppsecCov: - fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) - } - - return nil - } - - switch cfg.Cscli.Output { - case "human": - if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, cfg.Cscli.Color, parserCoverage) - } - - if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, cfg.Cscli.Color, scenarioCoverage) - } - - if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, cfg.Cscli.Color, appsecRuleCoverage) - } - - fmt.Println() - - if showParserCov || showAll { - fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) - } - - if showScenarioCov || showAll { - fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) - } - - if showAppsecCov || showAll { - fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) - } - case "json": - dump, err := json.MarshalIndent(parserCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - - dump, err = json.MarshalIndent(scenarioCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - - dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - default: - return errors.New("only human/json output modes are supported") - } - - return nil -} - -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { - var ( - showParserCov bool - showScenarioCov bool - showOnlyPercent bool - showAppsecCov bool - ) - - cmd := &cobra.Command{ - Use: "coverage", - Short: "coverage", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) - }, - } - - cmd.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") - cmd.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") - cmd.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") - cmd.PersistentFlags().BoolVar(&showAppsecCov, "appsec", false, "Show only appsec coverage") - - return cmd -} - -func (cli *cliHubTest) NewEvalCmd() *cobra.Command { - var evalExpression string - - cmd := &cobra.Command{ - Use: "eval", - Short: "eval [test_name]", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) - } - - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - return fmt.Errorf("can't load test results from '%s': %+v", test.ParserResultFile, err) - } - - output, err := test.ParserAssert.EvalExpression(evalExpression) - if err != nil { - return err - } - - fmt.Print(output) - } - - return nil - }, - } - - cmd.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") - - return cmd -} - -func (cli *cliHubTest) NewExplainCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "explain", - Short: "explain [test_name]", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := HubTest.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) - } - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %w", err) - } - } - - err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %w", err) - } - } - opts := dumps.DumpOpts{} - dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index ab8b4f82518..12128f6091a 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" @@ -261,7 +262,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) - cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) + cmd.AddCommand(clihubtest.New(cli.cfg).NewCommand()) cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) From eec32ad64b4b5fbdb039f36f2dd681f94c3597a2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:52:49 +0200 Subject: [PATCH 264/318] cscli refact: extract packages ask, clientinfo (#3197) * cscli: extrack package 'crowdsec-cli/ask' * cscli: extract package 'crowdsec-cli/clientinfo' --- cmd/crowdsec-cli/ask/ask.go | 20 +++++++ cmd/crowdsec-cli/bouncers.go | 64 +++-------------------- cmd/crowdsec-cli/clientinfo/clientinfo.go | 39 ++++++++++++++ cmd/crowdsec-cli/machines.go | 16 +++--- 4 files changed, 76 insertions(+), 63 deletions(-) create mode 100644 cmd/crowdsec-cli/ask/ask.go create mode 100644 cmd/crowdsec-cli/clientinfo/clientinfo.go diff --git a/cmd/crowdsec-cli/ask/ask.go b/cmd/crowdsec-cli/ask/ask.go new file mode 100644 index 00000000000..484ccb30c8a --- /dev/null +++ b/cmd/crowdsec-cli/ask/ask.go @@ -0,0 +1,20 @@ +package ask + +import ( + "github.com/AlecAivazis/survey/v2" +) + +func YesNo(message string, defaultAnswer bool) (bool, error) { + var answer bool + + prompt := &survey.Confirm{ + Message: message, + Default: defaultAnswer, + } + + if err := survey.AskOne(prompt, &answer); err != nil { + return defaultAnswer, err + } + + return answer, nil +} diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index d3edcea0db9..68ce1a2fa05 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -11,12 +11,13 @@ import ( "strings" "time" - "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" @@ -27,55 +28,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -type featureflagProvider interface { - GetFeatureflags() string -} - -type osProvider interface { - GetOsname() string - GetOsversion() string -} - -func getOSNameAndVersion(o osProvider) string { - ret := o.GetOsname() - if o.GetOsversion() != "" { - if ret != "" { - ret += "/" - } - - ret += o.GetOsversion() - } - - if ret == "" { - return "?" - } - - return ret -} - -func getFeatureFlagList(o featureflagProvider) []string { - if o.GetFeatureflags() == "" { - return nil - } - - return strings.Split(o.GetFeatureflags(), ",") -} - -func askYesNo(message string, defaultAnswer bool) (bool, error) { - var answer bool - - prompt := &survey.Confirm{ - Message: message, - Default: defaultAnswer, - } - - if err := survey.AskOne(prompt, &answer); err != nil { - return defaultAnswer, err - } - - return answer, nil -} - type cliBouncers struct { db *database.Client cfg configGetter @@ -171,8 +123,8 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { Version: b.Version, LastPull: b.LastPull, AuthType: b.AuthType, - OS: getOSNameAndVersion(b), - Featureflags: getFeatureFlagList(b), + OS: clientinfo.GetOSNameAndVersion(b), + Featureflags: clientinfo.GetFeatureFlagList(b), } } @@ -385,7 +337,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { func (cli *cliBouncers) prune(duration time.Duration, force bool) error { if duration < 2*time.Minute { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ "This may remove active bouncers. Continue?", false); err != nil { return err @@ -408,7 +360,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { cli.listHuman(color.Output, bouncers) if !force { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "You are about to PERMANENTLY remove the above bouncers from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err @@ -478,10 +430,10 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"Version", bouncer.Version}, {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, - {"OS", getOSNameAndVersion(bouncer)}, + {"OS", clientinfo.GetOSNameAndVersion(bouncer)}, }) - for _, ff := range getFeatureFlagList(bouncer) { + for _, ff := range clientinfo.GetFeatureFlagList(bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/cmd/crowdsec-cli/clientinfo/clientinfo.go b/cmd/crowdsec-cli/clientinfo/clientinfo.go new file mode 100644 index 00000000000..0bf1d98804f --- /dev/null +++ b/cmd/crowdsec-cli/clientinfo/clientinfo.go @@ -0,0 +1,39 @@ +package clientinfo + +import ( + "strings" +) + +type featureflagProvider interface { + GetFeatureflags() string +} + +type osProvider interface { + GetOsname() string + GetOsversion() string +} + +func GetOSNameAndVersion(o osProvider) string { + ret := o.GetOsname() + if o.GetOsversion() != "" { + if ret != "" { + ret += "/" + } + + ret += o.GetOsversion() + } + + if ret == "" { + return "?" + } + + return ret +} + +func GetFeatureFlagList(o featureflagProvider) []string { + if o.GetFeatureflags() == "" { + return nil + } + + return strings.Split(o.GetFeatureflags(), ",") +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 34d0b1b9208..8b35245405f 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -19,6 +19,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -138,7 +140,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { hb = emoji.Warning + " " + hb } - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } io.WriteString(out, t.Render() + "\n") @@ -171,8 +173,8 @@ func newMachineInfo(m *ent.Machine) machineInfo { Version: m.Version, IsValidated: m.IsValidated, AuthType: m.AuthType, - OS: getOSNameAndVersion(m), - Featureflags: getFeatureFlagList(m), + OS: clientinfo.GetOSNameAndVersion(m), + Featureflags: clientinfo.GetFeatureFlagList(m), Datasources: m.Datasources, } } @@ -466,7 +468,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { return err @@ -495,7 +497,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b cli.listHuman(color.Output, machines) if !force { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "You are about to PERMANENTLY remove the above machines from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err @@ -588,7 +590,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { {"Last Heartbeat", machine.LastHeartbeat}, {"Validated?", machine.IsValidated}, {"CrowdSec version", machine.Version}, - {"OS", getOSNameAndVersion(machine)}, + {"OS", clientinfo.GetOSNameAndVersion(machine)}, {"Auth type", machine.AuthType}, }) @@ -596,7 +598,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) } - for _, ff := range getFeatureFlagList(machine) { + for _, ff := range clientinfo.GetFeatureFlagList(machine) { t.AppendRow(table.Row{"Feature Flags", ff}) } From b880df9a683d249c2152408fb48d99b11dd1217f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 28 Aug 2024 11:52:25 +0200 Subject: [PATCH 265/318] cscli refact: package 'clihub' (#3198) * cscli refact: package 'clihub' * check for errors --- .golangci.yml | 2 +- cmd/crowdsec-cli/{ => clihub}/hub.go | 28 ++++++++----- cmd/crowdsec-cli/{ => clihub}/item_metrics.go | 2 +- cmd/crowdsec-cli/{ => clihub}/items.go | 8 ++-- cmd/crowdsec-cli/{ => clihub}/utils_table.go | 2 +- cmd/crowdsec-cli/itemcli.go | 7 ++-- cmd/crowdsec-cli/main.go | 3 +- cmd/crowdsec-cli/support.go | 42 +++++++++---------- 8 files changed, 50 insertions(+), 44 deletions(-) rename cmd/crowdsec-cli/{ => clihub}/hub.go (90%) rename cmd/crowdsec-cli/{ => clihub}/item_metrics.go (99%) rename cmd/crowdsec-cli/{ => clihub}/items.go (95%) rename cmd/crowdsec-cli/{ => clihub}/utils_table.go (99%) diff --git a/.golangci.yml b/.golangci.yml index 62147b54101..c59ab372799 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -482,7 +482,7 @@ issues: - linters: - revive - path: "cmd/crowdsec-cli/item_metrics.go" + path: "cmd/crowdsec-cli/clihub/item_metrics.go" text: "deep-exit: .*" - linters: diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/clihub/hub.go similarity index 90% rename from cmd/crowdsec-cli/hub.go rename to cmd/crowdsec-cli/clihub/hub.go index 34bd61b1277..22568355546 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -1,9 +1,10 @@ -package main +package clihub import ( "context" "encoding/json" "fmt" + "io" "github.com/fatih/color" log "github.com/sirupsen/logrus" @@ -11,14 +12,17 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) +type configGetter = func() *csconfig.Config + type cliHub struct { cfg configGetter } -func NewCLIHub(cfg configGetter) *cliHub { +func New(cfg configGetter) *cliHub { return &cliHub{ cfg: cfg, } @@ -47,14 +51,9 @@ cscli hub upgrade`, return cmd } -func (cli *cliHub) list(all bool) error { +func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, log.StandardLogger()) - if err != nil { - return err - } - for _, v := range hub.Warnings { log.Info(v) } @@ -65,14 +64,16 @@ func (cli *cliHub) list(all bool) error { items := make(map[string][]*cwhub.Item) + var err error + for _, itemType := range cwhub.ItemTypes { - items[itemType], err = selectItems(hub, itemType, nil, !all) + items[itemType], err = SelectItems(hub, itemType, nil, !all) if err != nil { return err } } - err = listItems(color.Output, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) + err = ListItems(out, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } @@ -89,7 +90,12 @@ func (cli *cliHub) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(all) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + if err != nil { + return err + } + + return cli.List(color.Output, hub, all) }, } diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/clihub/item_metrics.go similarity index 99% rename from cmd/crowdsec-cli/item_metrics.go rename to cmd/crowdsec-cli/clihub/item_metrics.go index f00ae08b00b..aaee63d1d38 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/clihub/item_metrics.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "net/http" diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/clihub/items.go similarity index 95% rename from cmd/crowdsec-cli/items.go rename to cmd/crowdsec-cli/clihub/items.go index 5a4fee4d582..4dd3c46e0f9 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "encoding/csv" @@ -16,7 +16,7 @@ import ( ) // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name -func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { +func SelectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { allItems := hub.GetItemsByType(itemType, true) itemNames := make([]string, len(allItems)) @@ -57,7 +57,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b return wantedItems, nil } -func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { +func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { switch output { case "human": nothingToDisplay := true @@ -146,7 +146,7 @@ func listItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } -func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { +func InspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go similarity index 99% rename from cmd/crowdsec-cli/utils_table.go rename to cmd/crowdsec-cli/clihub/utils_table.go index 6df16cd85f5..018071d91bb 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "fmt" diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 11ac1232eae..a5629b425b9 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -372,7 +373,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + if err = clihub.InspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } @@ -428,12 +429,12 @@ func (cli cliItem) list(args []string, all bool) error { items := make(map[string][]*cwhub.Item) - items[cli.name], err = selectItems(hub, cli.name, args, !all) + items[cli.name], err = clihub.SelectItems(hub, cli.name, args, !all) if err != nil { return err } - return listItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) + return clihub.ListItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 12128f6091a..2153ebfb7bb 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" @@ -249,7 +250,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) + cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 3ab1415f2ec..c48c84668ea 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -38,7 +39,7 @@ const ( SUPPORT_VERSION_PATH = "version.txt" SUPPORT_FEATURES_PATH = "features.txt" SUPPORT_OS_INFO_PATH = "osinfo.txt" - SUPPORT_HUB_DIR = "hub/" + SUPPORT_HUB = "hub.txt" SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" SUPPORT_AGENTS_PATH = "lapi/agents.txt" SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" @@ -163,26 +164,23 @@ func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { return nil } -func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType string) error { - var err error - - out := new(bytes.Buffer) - - log.Infof("Collecting hub: %s", itemType) +func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { + if hub == nil { + return errors.New("no hub connection") + } - items := make(map[string][]*cwhub.Item) + log.Infof("Collecting hub") - if items[itemType], err = selectItems(hub, itemType, nil, true); err != nil { - return fmt.Errorf("could not collect %s list: %w", itemType, err) - } + out := new(bytes.Buffer) - if err := listItems(out, cli.cfg().Cscli.Color, []string{itemType}, items, false, "human"); err != nil { - return fmt.Errorf("could not list %s: %w", itemType, err) + ch := clihub.New(cli.cfg) + if err := ch.List(out, hub, false); err != nil { + return err } stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_HUB_DIR+itemType+".txt", time.Now(), strings.NewReader(stripped)) + cli.writeToZip(zw, SUPPORT_HUB, time.Now(), strings.NewReader(stripped)) return nil } @@ -198,7 +196,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { // call the "cscli bouncers list" command directly, skip any preRun cm := cliBouncers{db: db, cfg: cli.cfg} - cm.list(out) + if err := cm.list(out); err != nil { + return err + } stripped := stripAnsiString(out.String()) @@ -218,7 +218,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { // call the "cscli machines list" command directly, skip any preRun cm := cliMachines{db: db, cfg: cli.cfg} - cm.list(out) + if err := cm.list(out); err != nil { + return err + } stripped := stripAnsiString(out.String()) @@ -513,12 +515,8 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect main config file: %s", err) } - if hub != nil { - for _, itemType := range cwhub.ItemTypes { - if err = cli.dumpHubItems(zipWriter, hub, itemType); err != nil { - log.Warnf("could not collect %s information: %s", itemType, err) - } - } + if err = cli.dumpHubItems(zipWriter, hub); err != nil { + log.Warnf("could not collect hub information: %s", err) } if err = cli.dumpBouncers(zipWriter, db); err != nil { From 0fb64682fd17452784b81a42f6909bd322afcacb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 29 Aug 2024 15:21:29 +0200 Subject: [PATCH 266/318] cscli refact: package cli{support, machine, bouncer} (#3199) * cscli refact: clisupport (reuse lapi status, capi status) * cscli refact: package clibouncer, climachine * cscli refact: package clisupport --- cmd/crowdsec-cli/{ => clibouncer}/bouncers.go | 16 +++-- cmd/crowdsec-cli/clicapi/capi.go | 42 +++++++------ cmd/crowdsec-cli/clilapi/lapi.go | 41 ++++++------ cmd/crowdsec-cli/{ => climachine}/flag.go | 2 +- cmd/crowdsec-cli/{ => climachine}/machines.go | 15 +++-- cmd/crowdsec-cli/{ => clisupport}/support.go | 62 +++++++------------ cmd/crowdsec-cli/main.go | 9 ++- test/bats/01_cscli.bats | 12 ++-- test/bats/03_noagent.bats | 2 +- test/bats/04_capi.bats | 10 +-- test/bats/04_nocapi.bats | 2 +- test/bats/09_socket.bats | 12 ++-- 12 files changed, 117 insertions(+), 108 deletions(-) rename cmd/crowdsec-cli/{ => clibouncer}/bouncers.go (96%) rename cmd/crowdsec-cli/{ => climachine}/flag.go (96%) rename cmd/crowdsec-cli/{ => climachine}/machines.go (98%) rename cmd/crowdsec-cli/{ => clisupport}/support.go (90%) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go similarity index 96% rename from cmd/crowdsec-cli/bouncers.go rename to cmd/crowdsec-cli/clibouncer/bouncers.go index 68ce1a2fa05..0d1484bcc6b 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -1,4 +1,4 @@ -package main +package clibouncer import ( "encoding/csv" @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" @@ -28,12 +29,14 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter = func() *csconfig.Config + type cliBouncers struct { db *database.Client cfg configGetter } -func NewCLIBouncers(cfg configGetter) *cliBouncers { +func New(cfg configGetter) *cliBouncers { return &cliBouncers{ cfg: cfg, } @@ -156,8 +159,11 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { return nil } -func (cli *cliBouncers) list(out io.Writer) error { - bouncers, err := cli.db.ListBouncers() +func (cli *cliBouncers) List(out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + bouncers, err := db.ListBouncers() if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) } @@ -194,7 +200,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(color.Output) + return cli.List(color.Output, cli.db) }, } diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index bf45613c776..4d658e3a602 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "io" "net/url" "os" + "github.com/fatih/color" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -23,7 +25,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -type configGetter func() *csconfig.Config +type configGetter = func() *csconfig.Config type cliCapi struct { cfg configGetter @@ -147,11 +149,11 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. -func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { +// queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. +func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { apiURL, err := url.Parse(credURL) if err != nil { - return false, false, fmt.Errorf("parsing api url: %w", err) + return false, false, err } itemsForAPI := hub.GetInstalledListForAPI() @@ -176,7 +178,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri }, }) if err != nil { - return false, false, fmt.Errorf("new client api: %w", err) + return false, false, err } pw := strfmt.Password(password) @@ -197,10 +199,11 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri if client.IsEnrolled() { return true, true, nil } + return true, false, nil } -func (cli *cliCapi) status() error { +func (cli *cliCapi) Status(out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() if err := require.CAPIRegistered(cfg); err != nil { @@ -209,24 +212,22 @@ func (cli *cliCapi) status() error { cred := cfg.API.Server.OnlineClient.Credentials - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := queryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) if err != nil { - return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) + return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } + if auth { - log.Info("You can successfully interact with Central API (CAPI)") + fmt.Fprint(out, "You can successfully interact with Central API (CAPI)\n") } + if enrolled { - log.Info("Your instance is enrolled in the console") + fmt.Fprint(out, "Your instance is enrolled in the console\n") } + return nil } @@ -237,7 +238,12 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.status() + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index ec66daf16a4..2de962d896d 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "io" "net/url" "os" "slices" "sort" "strings" + "github.com/fatih/color" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -30,10 +32,10 @@ import ( const LAPIURLPrefix = "v1" -type configGetter func() *csconfig.Config +type configGetter = func() *csconfig.Config type cliLapi struct { - cfg configGetter + cfg configGetter } func New(cfg configGetter) *cliLapi { @@ -42,11 +44,11 @@ func New(cfg configGetter) *cliLapi { } } -// QueryLAPIStatus checks if the Local API is reachable, and if the credentials are correct -func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { +// queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. +func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { apiURL, err := url.Parse(credURL) if err != nil { - return fmt.Errorf("parsing api url: %w", err) + return false, err } client, err := apiclient.NewDefaultClient(apiURL, @@ -54,7 +56,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri cwversion.UserAgent(), nil) if err != nil { - return fmt.Errorf("init default client: %w", err) + return false, err } pw := strfmt.Password(password) @@ -69,30 +71,26 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { - return err + return false, err } - return nil + return true, nil } -func (cli *cliLapi) status() error { +func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() cred := cfg.API.Client.Credentials - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + _, err := queryLAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if err != nil { return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) } - log.Infof("You can successfully interact with Local API (LAPI)") + fmt.Fprintf(out, "You can successfully interact with Local API (LAPI)\n") return nil } @@ -197,7 +195,12 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.status() + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/climachine/flag.go similarity index 96% rename from cmd/crowdsec-cli/flag.go rename to cmd/crowdsec-cli/climachine/flag.go index 1780d08e5f7..c3fefd896e1 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/climachine/flag.go @@ -1,4 +1,4 @@ -package main +package climachine // Custom types for flag validation and conversion. diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/climachine/machines.go similarity index 98% rename from cmd/crowdsec-cli/machines.go rename to cmd/crowdsec-cli/climachine/machines.go index 8b35245405f..bf8656105aa 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -1,4 +1,4 @@ -package main +package climachine import ( "encoding/csv" @@ -49,12 +49,14 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) { return hb, true } +type configGetter = func() *csconfig.Config + type cliMachines struct { db *database.Client cfg configGetter } -func NewCLIMachines(cfg configGetter) *cliMachines { +func New(cfg configGetter) *cliMachines { return &cliMachines{ cfg: cfg, } @@ -208,8 +210,11 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { return nil } -func (cli *cliMachines) list(out io.Writer) error { - machines, err := cli.db.ListMachines() +func (cli *cliMachines) List(out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + machines, err := db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %w", err) } @@ -247,7 +252,7 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(color.Output) + return cli.List(color.Output, cli.db) }, } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/clisupport/support.go similarity index 90% rename from cmd/crowdsec-cli/support.go rename to cmd/crowdsec-cli/clisupport/support.go index c48c84668ea..55f0ec4b03e 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -1,4 +1,4 @@ -package main +package clisupport import ( "archive/zip" @@ -22,9 +22,11 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -165,15 +167,15 @@ func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { } func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { + log.Infof("Collecting hub") + if hub == nil { return errors.New("no hub connection") } - log.Infof("Collecting hub") - out := new(bytes.Buffer) - ch := clihub.New(cli.cfg) + if err := ch.List(out, hub, false); err != nil { return err } @@ -193,10 +195,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) + cm := clibouncer.New(cli.cfg) - // call the "cscli bouncers list" command directly, skip any preRun - cm := cliBouncers{db: db, cfg: cli.cfg} - if err := cm.list(out); err != nil { + if err := cm.List(out, db); err != nil { return err } @@ -215,10 +216,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) + cm := climachine.New(cli.cfg) - // call the "cscli machines list" command directly, skip any preRun - cm := cliMachines{db: db, cfg: cli.cfg} - if err := cm.list(out); err != nil { + if err := cm.List(out, db); err != nil { return err } @@ -232,22 +232,17 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting LAPI status") - cfg := cli.cfg() - cred := cfg.API.Client.Credentials - out := new(bytes.Buffer) + cl := clilapi.New(cli.cfg) - fmt.Fprintf(out, "LAPI credentials file: %s\n", cfg.API.Client.CredentialsFilePath) - fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) - fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - - if err := clilapi.QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { - return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) + err := cl.Status(out, hub) + if err != nil { + fmt.Fprintf(out, "%s\n", err) } - fmt.Fprintln(out, "You can successfully interact with Local API (LAPI)") + stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), out) + cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) return nil } @@ -255,28 +250,17 @@ func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting CAPI status") - cfg := cli.cfg() - cred := cfg.API.Server.OnlineClient.Credentials - out := new(bytes.Buffer) + cc := clicapi.New(cli.cfg) - fmt.Fprintf(out, "CAPI credentials file: %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) - fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) - fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - - auth, enrolled, err := clicapi.QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + err := cc.Status(out, hub) if err != nil { - return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) - } - if auth { - fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + fmt.Fprintf(out, "%s\n", err) } - if enrolled { - fmt.Fprintln(out, "Your instance is enrolled in the console") - } + stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) + cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) return nil } @@ -389,11 +373,13 @@ func (cli *cliSupport) dumpCrash(zw *zip.Writer) error { return nil } +type configGetter func() *csconfig.Config + type cliSupport struct { cfg configGetter } -func NewCLISupport(cfg configGetter) *cliSupport { +func New(cfg configGetter) *cliSupport { return &cliSupport{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 2153ebfb7bb..2a1f5ac7ebe 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,17 +14,20 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisupport" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -256,8 +259,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) + cmd.AddCommand(clibouncer.New(cli.cfg).NewCommand()) + cmd.AddCommand(climachine.New(cli.cfg).NewCommand()) cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) cmd.AddCommand(clilapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) @@ -265,7 +268,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(clihubtest.New(cli.cfg).NewCommand()) cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) + cmd.AddCommand(clisupport.New(cli.cfg).NewCommand()) cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 27cfe53212b..bda2362c02a 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -213,9 +213,9 @@ teardown() { rune -0 ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli - missing LAPI credentials file" { @@ -260,7 +260,7 @@ teardown() { rune -0 ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" rm "$LOCAL_API_CREDENTIALS".local @@ -272,7 +272,7 @@ teardown() { config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" # but if a variable is not defined, there is no specific error message unset URL @@ -299,7 +299,7 @@ teardown() { rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 60731b90713..6be5101cee2 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -76,7 +76,7 @@ teardown() { config_disable_agent ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli metrics" { diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index 830d0668cbb..f17ce376d62 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -55,10 +55,10 @@ setup() { rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial " on https://api.crowdsec.net/" - assert_stderr --partial "You can successfully interact with Central API (CAPI)" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial " on https://api.crowdsec.net/" + assert_output --partial "You can successfully interact with Central API (CAPI)" } @test "cscli alerts list: receive a community pull when capi is enabled" { @@ -85,7 +85,7 @@ setup() { config_disable_agent ./instance-crowdsec start rune -0 cscli capi status - assert_stderr --partial "You can successfully interact with Central API (CAPI)" + assert_output --partial "You can successfully interact with Central API (CAPI)" } @test "capi register must be run from lapi" { diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index c02a75810b9..d22a6f0a953 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -66,7 +66,7 @@ teardown() { config_disable_capi ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli metrics" { diff --git a/test/bats/09_socket.bats b/test/bats/09_socket.bats index f770abaad2e..f861d8a40dc 100644 --- a/test/bats/09_socket.bats +++ b/test/bats/09_socket.bats @@ -37,22 +37,22 @@ teardown() { ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on $socket" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on $socket" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "crowdsec - listen on both socket and TCP" { ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" + assert_output --partial "You can successfully interact with Local API (LAPI)" config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on $socket" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on $socket" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli - authenticate new machine with socket" { From 8c0c10cd7ab5f6c0871e13f455d3e26a0148d243 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 2 Sep 2024 09:11:59 +0100 Subject: [PATCH 267/318] enhance: return an error if cscli dashboard is run within a container (#3207) --- cmd/crowdsec-cli/dashboard.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index eb4a2a5956a..31efd074bf9 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" + "github.com/crowdsecurity/go-cs-lib/version" ) var ( @@ -103,6 +104,10 @@ cscli dashboard remove log.Warn("cscli dashboard will be deprecated in version 1.7.0, read more at https://docs.crowdsec.net/blog/cscli_dashboard_deprecation/") + if version.System == "docker" { + return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") + } + return nil }, } From d2616766deba0cf9c4a67c7e634b1226db86f2ed Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 2 Sep 2024 13:13:40 +0200 Subject: [PATCH 268/318] Allow auto registration of machines in LAPI (#3202) Co-authored-by: marco --- cmd/crowdsec-cli/clilapi/lapi.go | 27 +-- pkg/apiclient/client.go | 2 +- pkg/apiclient/config.go | 17 +- pkg/apiserver/alerts_test.go | 2 +- pkg/apiserver/apiserver.go | 3 +- pkg/apiserver/apiserver_test.go | 27 ++- pkg/apiserver/controllers/controller.go | 2 + pkg/apiserver/controllers/v1/controller.go | 17 +- pkg/apiserver/controllers/v1/machines.go | 54 +++++- pkg/apiserver/jwt_test.go | 2 +- pkg/apiserver/machines_test.go | 102 +++++++++- pkg/csconfig/api.go | 108 ++++++++--- pkg/csconfig/api_test.go | 6 + pkg/models/localapi_swagger.yaml | 7 + pkg/models/watcher_registration_request.go | 25 +++ test/bats/01_cscli.bats | 103 ---------- test/bats/01_cscli_lapi.bats | 213 +++++++++++++++++++++ 17 files changed, 546 insertions(+), 171 deletions(-) create mode 100644 test/bats/01_cscli_lapi.bats diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index 2de962d896d..a6b88101cbf 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -95,7 +95,7 @@ func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliLapi) register(apiURL string, outputFile string, machine string) error { +func (cli *cliLapi) register(apiURL string, outputFile string, machine string, token string) error { var err error lapiUser := machine @@ -116,11 +116,12 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e } _, err = apiclient.RegisterClient(&apiclient.Config{ - MachineID: lapiUser, - Password: password, - UserAgent: cwversion.UserAgent(), - URL: apiurl, - VersionPrefix: LAPIURLPrefix, + MachineID: lapiUser, + Password: password, + UserAgent: cwversion.UserAgent(), + RegistrationToken: token, + URL: apiurl, + VersionPrefix: LAPIURLPrefix, }, nil) if err != nil { return fmt.Errorf("api client register: %w", err) @@ -138,10 +139,12 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e dumpFile = "" } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: lapiUser, - Password: password.String(), - URL: apiURL, + apiCfg := cfg.API.Client.Credentials + apiCfg.Login = lapiUser + apiCfg.Password = password.String() + + if apiURL != "" { + apiCfg.URL = apiURL } apiConfigDump, err := yaml.Marshal(apiCfg) @@ -212,6 +215,7 @@ func (cli *cliLapi) newRegisterCmd() *cobra.Command { apiURL string outputFile string machine string + token string ) cmd := &cobra.Command{ @@ -222,7 +226,7 @@ Keep in mind the machine needs to be validated by an administrator on LAPI side Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(apiURL, outputFile, machine) + return cli.register(apiURL, outputFile, machine, token) }, } @@ -230,6 +234,7 @@ Keep in mind the machine needs to be validated by an administrator on LAPI side flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") flags.StringVarP(&outputFile, "file", "f", "", "output file destination") flags.StringVar(&machine, "machine", "", "Name of the machine to register with") + flags.StringVar(&token, "token", "", "Auto registration token to use") return cmd } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 3abd42cf009..5669fd24786 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -177,7 +177,7 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) - resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password}) + resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) /*if we have http status, return it*/ if err != nil { if resp != nil && resp.Response != nil { diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index 4dfeb3e863f..b08452e74e0 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -7,12 +7,13 @@ import ( ) type Config struct { - MachineID string - Password strfmt.Password - Scenarios []string - URL *url.URL - PapiURL *url.URL - VersionPrefix string - UserAgent string - UpdateScenario func() ([]string, error) + MachineID string + Password strfmt.Password + Scenarios []string + URL *url.URL + PapiURL *url.URL + VersionPrefix string + UserAgent string + RegistrationToken string + UpdateScenario func() ([]string, error) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 812e33ae13b..891eb3a8f4a 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -71,7 +71,7 @@ func InitMachineTest(t *testing.T) (*gin.Engine, models.WatcherAuthResponse, csc } func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) w := httptest.NewRecorder() diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index bd0b5d39bf4..31b31bcb82d 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -235,6 +235,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { Log: clog, ConsoleConfig: config.ConsoleConfig, DisableRemoteLapiRegistration: config.DisableRemoteLapiRegistration, + AutoRegisterCfg: config.AutoRegister, } var ( diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b3f619f39c1..f48791ebcb8 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -29,10 +29,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +const ( + validRegistrationToken = "igheethauCaeteSaiyee3LosohPhahze" + invalidRegistrationToken = "vohl1feibechieG5coh8musheish2auj" +) + var ( testMachineID = "test" testPassword = strfmt.Password("test") - MachineTest = models.WatcherAuthRequest{ + MachineTest = models.WatcherRegistrationRequest{ MachineID: &testMachineID, Password: &testPassword, } @@ -65,6 +70,14 @@ func LoadTestConfig(t *testing.T) csconfig.Config { ShareTaintedScenarios: new(bool), ShareCustomScenarios: new(bool), }, + AutoRegister: &csconfig.LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(true), + Token: validRegistrationToken, + AllowedRanges: []string{ + "127.0.0.1/8", + "::1/128", + }, + }, } apiConfig := csconfig.APICfg{ @@ -75,6 +88,9 @@ func LoadTestConfig(t *testing.T) csconfig.Config { err := config.API.Server.LoadProfiles() require.NoError(t, err) + err = config.API.Server.LoadAutoRegister() + require.NoError(t, err) + return config } @@ -113,6 +129,9 @@ func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { err := config.API.Server.LoadProfiles() require.NoError(t, err) + err = config.API.Server.LoadAutoRegister() + require.NoError(t, err) + return config } @@ -251,8 +270,10 @@ func readDecisionsStreamResp(t *testing.T, resp *httptest.ResponseRecorder) (map return response, resp.Code } -func CreateTestMachine(t *testing.T, router *gin.Engine) string { - b, err := json.Marshal(MachineTest) +func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { + regReq := MachineTest + regReq.RegistrationToken = token + b, err := json.Marshal(regReq) require.NoError(t, err) body := string(b) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 8175f431384..29f02723b70 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -29,6 +29,7 @@ type Controller struct { ConsoleConfig *csconfig.ConsoleConfig TrustedIPs []net.IPNet HandlerV1 *v1.Controller + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg DisableRemoteLapiRegistration bool } @@ -89,6 +90,7 @@ func (c *Controller) NewV1() error { PluginChannel: c.PluginChannel, ConsoleConfig: *c.ConsoleConfig, TrustedIPs: c.TrustedIPs, + AutoRegisterCfg: c.AutoRegisterCfg, } c.HandlerV1, err = v1.New(&v1Config) diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go index ad76ad76616..6de4abe3b3b 100644 --- a/pkg/apiserver/controllers/v1/controller.go +++ b/pkg/apiserver/controllers/v1/controller.go @@ -23,9 +23,10 @@ type Controller struct { AlertsAddChan chan []*models.Alert DecisionDeleteChan chan []*models.Decision - PluginChannel chan csplugin.ProfileAlert - ConsoleConfig csconfig.ConsoleConfig - TrustedIPs []net.IPNet + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg } type ControllerV1Config struct { @@ -36,9 +37,10 @@ type ControllerV1Config struct { AlertsAddChan chan []*models.Alert DecisionDeleteChan chan []*models.Decision - PluginChannel chan csplugin.ProfileAlert - ConsoleConfig csconfig.ConsoleConfig - TrustedIPs []net.IPNet + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg } func New(cfg *ControllerV1Config) (*Controller, error) { @@ -59,9 +61,10 @@ func New(cfg *ControllerV1Config) (*Controller, error) { PluginChannel: cfg.PluginChannel, ConsoleConfig: cfg.ConsoleConfig, TrustedIPs: cfg.TrustedIPs, + AutoRegisterCfg: cfg.AutoRegisterCfg, } - v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) + v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) if err != nil { return v1, err } diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index 84a6ef2583c..0030f7d3b39 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -1,15 +1,50 @@ package v1 import ( + "errors" + "net" "net/http" "github.com/gin-gonic/gin" "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) +func (c *Controller) shouldAutoRegister(token string, gctx *gin.Context) (bool, error) { + if !*c.AutoRegisterCfg.Enable { + return false, nil + } + + clientIP := net.ParseIP(gctx.ClientIP()) + + // Can probaby happen if using unix socket ? + if clientIP == nil { + log.Warnf("Failed to parse client IP for watcher self registration: %s", gctx.ClientIP()) + return false, nil + } + + if token == "" || c.AutoRegisterCfg == nil { + return false, nil + } + + // Check the token + if token != c.AutoRegisterCfg.Token { + return false, errors.New("invalid token for auto registration") + } + + // Check the source IP + for _, ipRange := range c.AutoRegisterCfg.AllowedRangesParsed { + if ipRange.Contains(clientIP) { + return true, nil + } + } + + return false, errors.New("IP not in allowed range for auto registration") +} + func (c *Controller) CreateMachine(gctx *gin.Context) { var input models.WatcherRegistrationRequest @@ -19,14 +54,27 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { } if err := input.Validate(strfmt.Default); err != nil { - c.HandleDBErrors(gctx, err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) return } - if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), false, false, types.PasswordAuthType); err != nil { + autoRegister, err := c.shouldAutoRegister(input.RegistrationToken, gctx) + if err != nil { + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": *input.MachineID}).Errorf("Auto-register failed: %s", err) + gctx.JSON(http.StatusUnauthorized, gin.H{"message": err.Error()}) + + return + } + + if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { c.HandleDBErrors(gctx, err) return } - gctx.Status(http.StatusCreated) + if autoRegister { + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": *input.MachineID}).Info("Auto-registered machine") + gctx.Status(http.StatusAccepted) + } else { + gctx.Status(http.StatusCreated) + } } diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 58f66cfc74f..aa6e84e416b 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -12,7 +12,7 @@ import ( func TestLogin(t *testing.T) { router, config := NewAPITest(t) - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") // Login with machine not validated yet w := httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 08efa91c6c1..041a6bee528 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestCreateMachine(t *testing.T) { @@ -20,7 +22,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 400, w.Code) + assert.Equal(t, http.StatusBadRequest, w.Code) assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create machine with invalid input @@ -29,7 +31,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusUnprocessableEntity, w.Code) assert.Equal(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) // Create machine @@ -43,7 +45,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) } @@ -62,7 +64,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { req.Header.Add("X-Real-Ip", "1.1.1.1") router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -85,7 +87,7 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { req.Header.Add("X-Real-IP", "1.1.1.1") router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -109,7 +111,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -122,7 +124,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { func TestCreateMachineAlreadyExist(t *testing.T) { router, _ := NewAPITest(t) - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") w := httptest.NewRecorder() req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) @@ -134,6 +136,90 @@ func TestCreateMachineAlreadyExist(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.Equal(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) } + +func TestAutoRegistration(t *testing.T) { + router, _ := NewAPITest(t) + + //Invalid registration token / valid source IP + regReq := MachineTest + regReq.RegistrationToken = invalidRegistrationToken + b, err := json.Marshal(regReq) + require.NoError(t, err) + + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //Invalid registration token / invalid source IP + regReq = MachineTest + regReq.RegistrationToken = invalidRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "42.42.42.42:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //valid registration token / invalid source IP + regReq = MachineTest + regReq.RegistrationToken = validRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "42.42.42.42:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //Valid registration token / valid source IP + regReq = MachineTest + regReq.RegistrationToken = validRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusAccepted, w.Code) + + //No token / valid source IP + regReq = MachineTest + regReq.MachineID = ptr.Of("test2") + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusCreated, w.Code) +} diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index a23df957282..4a28b590e80 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -236,32 +236,40 @@ type CapiWhitelist struct { Cidrs []*net.IPNet `yaml:"cidrs,omitempty"` } +type LocalAPIAutoRegisterCfg struct { + Enable *bool `yaml:"enabled"` + Token string `yaml:"token"` + AllowedRanges []string `yaml:"allowed_ranges,omitempty"` + AllowedRangesParsed []*net.IPNet `yaml:"-"` +} + /*local api service configuration*/ type LocalApiServerCfg struct { - Enable *bool `yaml:"enable"` - ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 - ListenSocket string `yaml:"listen_socket,omitempty"` - TLS *TLSCfg `yaml:"tls"` - DbConfig *DatabaseCfg `yaml:"-"` - LogDir string `yaml:"-"` - LogMedia string `yaml:"-"` - OnlineClient *OnlineApiClientCfg `yaml:"online_client"` - ProfilesPath string `yaml:"profiles_path,omitempty"` - ConsoleConfigPath string `yaml:"console_path,omitempty"` - ConsoleConfig *ConsoleConfig `yaml:"-"` - Profiles []*ProfileCfg `yaml:"-"` - LogLevel *log.Level `yaml:"log_level"` - UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` - TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` - CompressLogs *bool `yaml:"-"` - LogMaxSize int `yaml:"-"` - LogMaxAge int `yaml:"-"` - LogMaxFiles int `yaml:"-"` - TrustedIPs []string `yaml:"trusted_ips,omitempty"` - PapiLogLevel *log.Level `yaml:"papi_log_level"` - DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` - CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` - CapiWhitelists *CapiWhitelist `yaml:"-"` + Enable *bool `yaml:"enable"` + ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 + ListenSocket string `yaml:"listen_socket,omitempty"` + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + LogMedia string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + ConsoleConfigPath string `yaml:"console_path,omitempty"` + ConsoleConfig *ConsoleConfig `yaml:"-"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` + UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` + TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` + CompressLogs *bool `yaml:"-"` + LogMaxSize int `yaml:"-"` + LogMaxAge int `yaml:"-"` + LogMaxFiles int `yaml:"-"` + TrustedIPs []string `yaml:"trusted_ips,omitempty"` + PapiLogLevel *log.Level `yaml:"papi_log_level"` + DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` + CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` + CapiWhitelists *CapiWhitelist `yaml:"-"` + AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"` } func (c *LocalApiServerCfg) ClientURL() string { @@ -348,6 +356,14 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Infof("loaded capi whitelist from %s: %d IPs, %d CIDRs", c.API.Server.CapiWhitelistsPath, len(c.API.Server.CapiWhitelists.Ips), len(c.API.Server.CapiWhitelists.Cidrs)) } + if err := c.API.Server.LoadAutoRegister(); err != nil { + return err + } + + if c.API.Server.AutoRegister != nil && c.API.Server.AutoRegister.Enable != nil && *c.API.Server.AutoRegister.Enable && !inCli { + log.Infof("auto LAPI registration enabled for ranges %+v", c.API.Server.AutoRegister.AllowedRanges) + } + c.API.Server.LogDir = c.Common.LogDir c.API.Server.LogMedia = c.Common.LogMedia c.API.Server.CompressLogs = c.Common.CompressLogs @@ -455,3 +471,47 @@ func (c *Config) LoadAPIClient() error { return c.API.Client.Load() } + +func (c *LocalApiServerCfg) LoadAutoRegister() error { + if c.AutoRegister == nil { + c.AutoRegister = &LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(false), + } + + return nil + } + + // Disable by default + if c.AutoRegister.Enable == nil { + c.AutoRegister.Enable = ptr.Of(false) + } + + if !*c.AutoRegister.Enable { + return nil + } + + if c.AutoRegister.Token == "" { + return errors.New("missing token value for api.server.auto_register") + } + + if len(c.AutoRegister.Token) < 32 { + return errors.New("token value for api.server.auto_register is too short (min 32 characters)") + } + + if c.AutoRegister.AllowedRanges == nil { + return errors.New("missing allowed_ranges value for api.server.auto_register") + } + + c.AutoRegister.AllowedRangesParsed = make([]*net.IPNet, 0, len(c.AutoRegister.AllowedRanges)) + + for _, ipRange := range c.AutoRegister.AllowedRanges { + _, ipNet, err := net.ParseCIDR(ipRange) + if err != nil { + return fmt.Errorf("auto_register: failed to parse allowed range '%s': %w", ipRange, err) + } + + c.AutoRegister.AllowedRangesParsed = append(c.AutoRegister.AllowedRangesParsed, ipNet) + } + + return nil +} diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 51a4c5ad602..96945202aa8 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -217,6 +217,12 @@ func TestLoadAPIServer(t *testing.T) { ProfilesPath: "./testdata/profiles.yaml", UseForwardedForHeaders: false, PapiLogLevel: &logLevel, + AutoRegister: &LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(false), + Token: "", + AllowedRanges: nil, + AllowedRangesParsed: nil, + }, }, }, { diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 9edfd12b82f..01bbe6f8bde 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -312,6 +312,9 @@ paths: '201': description: Watcher Created headers: {} + '202': + description: Watcher Validated + headers: {} '400': description: "400 response" schema: @@ -726,6 +729,10 @@ definitions: password: type: string format: password + registration_token: + type: string + minLength: 32 + maxLength: 255 required: - machine_id - password diff --git a/pkg/models/watcher_registration_request.go b/pkg/models/watcher_registration_request.go index 8be802ea3e7..673f0d59b9e 100644 --- a/pkg/models/watcher_registration_request.go +++ b/pkg/models/watcher_registration_request.go @@ -27,6 +27,11 @@ type WatcherRegistrationRequest struct { // Required: true // Format: password Password *strfmt.Password `json:"password"` + + // registration token + // Max Length: 255 + // Min Length: 32 + RegistrationToken string `json:"registration_token,omitempty"` } // Validate validates this watcher registration request @@ -41,6 +46,10 @@ func (m *WatcherRegistrationRequest) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateRegistrationToken(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -69,6 +78,22 @@ func (m *WatcherRegistrationRequest) validatePassword(formats strfmt.Registry) e return nil } +func (m *WatcherRegistrationRequest) validateRegistrationToken(formats strfmt.Registry) error { + if swag.IsZero(m.RegistrationToken) { // not required + return nil + } + + if err := validate.MinLength("registration_token", "body", m.RegistrationToken, 32); err != nil { + return err + } + + if err := validate.MaxLength("registration_token", "body", m.RegistrationToken, 255); err != nil { + return err + } + + return nil +} + // ContextValidate validates this watcher registration request based on context it is used func (m *WatcherRegistrationRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index bda2362c02a..264870501a5 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -209,109 +209,6 @@ teardown() { rm -rf -- "${backupdir:?}" } -@test "cscli lapi status" { - rune -0 ./instance-crowdsec start - rune -0 cscli lapi status - - assert_output --partial "Loaded credentials from" - assert_output --partial "Trying to authenticate with username" - assert_output --partial "You can successfully interact with Local API (LAPI)" -} - -@test "cscli - missing LAPI credentials file" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - rm -f "$LOCAL_API_CREDENTIALS" - rune -1 cscli lapi status - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" - - rune -1 cscli alerts list - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" - - rune -1 cscli decisions list - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" -} - -@test "cscli - empty LAPI credentials file" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - : > "$LOCAL_API_CREDENTIALS" - rune -1 cscli lapi status - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" - - rune -1 cscli alerts list - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" - - rune -1 cscli decisions list - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" -} - -@test "cscli - LAPI credentials file can reference env variables" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') - export URL - LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') - export LOGIN - PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') - export PASSWORD - - # shellcheck disable=SC2016 - echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local - - config_set '.crowdsec_service.enable=false' - rune -0 ./instance-crowdsec start - - rune -0 cscli lapi status - assert_output --partial "You can successfully interact with Local API (LAPI)" - - rm "$LOCAL_API_CREDENTIALS".local - - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' - - rune -0 cscli lapi status - assert_output --partial "You can successfully interact with Local API (LAPI)" - - # but if a variable is not defined, there is no specific error message - unset URL - rune -1 cscli lapi status - # shellcheck disable=SC2016 - assert_stderr --partial 'BaseURL must have a trailing slash' -} - -@test "cscli - missing LAPI client settings" { - config_set 'del(.api.client)' - rune -1 cscli lapi status - assert_stderr --partial "loading api client: no API client section in configuration" - - rune -1 cscli alerts list - assert_stderr --partial "loading api client: no API client section in configuration" - - rune -1 cscli decisions list - assert_stderr --partial "loading api client: no API client section in configuration" -} - -@test "cscli - malformed LAPI url" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' - - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' -} - -@test "cscli - bad LAPI password" { - rune -0 ./instance-crowdsec start - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' - - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' -} - @test "'cscli completion' with or without configuration file" { rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats new file mode 100644 index 00000000000..6e876576a6e --- /dev/null +++ b/test/bats/01_cscli_lapi.bats @@ -0,0 +1,213 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # don't run crowdsec here, not all tests require a running instance +} + +teardown() { + cd "$TEST_DIR" || exit 1 + ./instance-crowdsec stop +} + +#---------- + +@test "cscli lapi status" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli - missing LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + rm -f "$LOCAL_API_CREDENTIALS" + rune -1 cscli lapi status + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" + + rune -1 cscli alerts list + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" + + rune -1 cscli decisions list + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" +} + +@test "cscli - empty LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + : > "$LOCAL_API_CREDENTIALS" + rune -1 cscli lapi status + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" + + rune -1 cscli alerts list + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" + + rune -1 cscli decisions list + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" +} + +@test "cscli - LAPI credentials file can reference env variables" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') + export URL + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + export LOGIN + PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') + export PASSWORD + + # shellcheck disable=SC2016 + echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local + + config_set '.crowdsec_service.enable=false' + rune -0 ./instance-crowdsec start + + rune -0 cscli lapi status + assert_output --partial "You can successfully interact with Local API (LAPI)" + + rm "$LOCAL_API_CREDENTIALS".local + + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' + + rune -0 cscli lapi status + assert_output --partial "You can successfully interact with Local API (LAPI)" + + # but if a variable is not defined, there is no specific error message + unset URL + rune -1 cscli lapi status + # shellcheck disable=SC2016 + assert_stderr --partial 'BaseURL must have a trailing slash' +} + +@test "cscli - missing LAPI client settings" { + config_set 'del(.api.client)' + rune -1 cscli lapi status + assert_stderr --partial "loading api client: no API client section in configuration" + + rune -1 cscli alerts list + assert_stderr --partial "loading api client: no API client section in configuration" + + rune -1 cscli decisions list + assert_stderr --partial "loading api client: no API client section in configuration" +} + +@test "cscli - malformed LAPI url" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' + + rune -1 cscli lapi status -o json + rune -0 jq -r '.msg' <(stderr) + assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' +} + +@test "cscli - bad LAPI password" { + rune -0 ./instance-crowdsec start + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' + + rune -1 cscli lapi status -o json + rune -0 jq -r '.msg' <(stderr) + assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' +} + +@test "cscli lapi register / machines validate" { + rune -1 cscli lapi register + assert_stderr --partial "connection refused" + + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + + rune -0 ./instance-crowdsec start + rune -0 cscli lapi register + assert_stderr --partial "Successfully registered to Local API" + assert_stderr --partial "Local API credentials written to '$LOCAL_API_CREDENTIALS'" + assert_stderr --partial "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." + + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + + rune -0 cscli machines inspect "$LOGIN" -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "null" + + rune -0 cscli machines validate "$LOGIN" + + rune -0 cscli machines inspect "$LOGIN" -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --machine" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi register --machine newmachine + rune -0 cscli machines validate newmachine + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --token (ignored)" { + # A token is ignored if the server is not configured with it + rune -1 cscli lapi register --machine newmachine --token meh + assert_stderr --partial "connection refused" + + rune -0 ./instance-crowdsec start + rune -1 cscli lapi register --machine newmachine --token meh + assert_stderr --partial '422 Unprocessable Entity: API error: http code 422, invalid request:' + assert_stderr --partial 'registration_token in body should be at least 32 chars long' + + rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 + assert_stderr --partial "Successfully registered to Local API" + + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "null" +} + +@test "cscli lapi register --token" { + config_set '.api.server.auto_registration.enabled=true' + config_set '.api.server.auto_registration.token="12345678901234567890123456789012"' + config_set '.api.server.auto_registration.allowed_ranges=["127.0.0.1/32"]' + + rune -0 ./instance-crowdsec start + + rune -1 cscli lapi register --machine malicious --token 123456789012345678901234badtoken + assert_stderr --partial "401 Unauthorized: API error: invalid token for auto registration" + rune -1 cscli machines inspect malicious -o json + assert_stderr --partial "unable to read machine data 'malicious': user 'malicious': user doesn't exist" + + rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 + assert_stderr --partial "Successfully registered to Local API" + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --token (bad source ip)" { + config_set '.api.server.auto_registration.enabled=true' + config_set '.api.server.auto_registration.token="12345678901234567890123456789012"' + config_set '.api.server.auto_registration.allowed_ranges=["127.0.0.2/32"]' + + rune -0 ./instance-crowdsec start + + rune -1 cscli lapi register --machine outofrange --token 12345678901234567890123456789012 + assert_stderr --partial "401 Unauthorized: API error: IP not in allowed range for auto registration" + rune -1 cscli machines inspect outofrange -o json + assert_stderr --partial "unable to read machine data 'outofrange': user 'outofrange': user doesn't exist" +} From dda0fa1dfabf8efeb297e88bbef6a8d41b8e6f8e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 2 Sep 2024 14:04:14 +0200 Subject: [PATCH 269/318] cscli dashboard: exit earlier on docker (#3210) --- cmd/crowdsec-cli/dashboard.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 31efd074bf9..13cebe3dbd5 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -71,6 +71,10 @@ cscli dashboard stop cscli dashboard remove `, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if version.System == "docker" { + return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") + } + cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err @@ -104,10 +108,6 @@ cscli dashboard remove log.Warn("cscli dashboard will be deprecated in version 1.7.0, read more at https://docs.crowdsec.net/blog/cscli_dashboard_deprecation/") - if version.System == "docker" { - return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") - } - return nil }, } From f0d6046c596b3b76ce3265474b58cca09ad761a4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 2 Sep 2024 14:30:25 +0200 Subject: [PATCH 270/318] cscli refact: package cliitem (#3204) --- .../{hubappsec.go => cliitem/appsec.go} | 6 +++--- .../{hubcollection.go => cliitem/collection.go} | 4 ++-- .../{hubcontext.go => cliitem/context.go} | 4 ++-- cmd/crowdsec-cli/{ => cliitem}/hubscenario.go | 4 ++-- cmd/crowdsec-cli/{itemcli.go => cliitem/item.go} | 5 ++++- .../{hubparser.go => cliitem/parser.go} | 4 ++-- .../postoverflow.go} | 4 ++-- .../{item_suggest.go => cliitem/suggest.go} | 2 +- cmd/crowdsec-cli/main.go | 15 ++++++++------- 9 files changed, 26 insertions(+), 22 deletions(-) rename cmd/crowdsec-cli/{hubappsec.go => cliitem/appsec.go} (96%) rename cmd/crowdsec-cli/{hubcollection.go => cliitem/collection.go} (95%) rename cmd/crowdsec-cli/{hubcontext.go => cliitem/context.go} (94%) rename cmd/crowdsec-cli/{ => cliitem}/hubscenario.go (95%) rename cmd/crowdsec-cli/{itemcli.go => cliitem/item.go} (99%) rename cmd/crowdsec-cli/{hubparser.go => cliitem/parser.go} (95%) rename cmd/crowdsec-cli/{hubpostoverflow.go => cliitem/postoverflow.go} (95%) rename cmd/crowdsec-cli/{item_suggest.go => cliitem/suggest.go} (99%) diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/cliitem/appsec.go similarity index 96% rename from cmd/crowdsec-cli/hubappsec.go rename to cmd/crowdsec-cli/cliitem/appsec.go index 1df3212f941..db567f86a32 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/cliitem/appsec.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "fmt" @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIAppsecConfig(cfg configGetter) *cliItem { +func NewAppsecConfig(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.APPSEC_CONFIGS, @@ -47,7 +47,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, } } -func NewCLIAppsecRule(cfg configGetter) *cliItem { +func NewAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode if cfg().Cscli.Output != "human" { diff --git a/cmd/crowdsec-cli/hubcollection.go b/cmd/crowdsec-cli/cliitem/collection.go similarity index 95% rename from cmd/crowdsec-cli/hubcollection.go rename to cmd/crowdsec-cli/cliitem/collection.go index 655b36eb1b8..ea91c1e537a 100644 --- a/cmd/crowdsec-cli/hubcollection.go +++ b/cmd/crowdsec-cli/cliitem/collection.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLICollection(cfg configGetter) *cliItem { +func NewCollection(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.COLLECTIONS, diff --git a/cmd/crowdsec-cli/hubcontext.go b/cmd/crowdsec-cli/cliitem/context.go similarity index 94% rename from cmd/crowdsec-cli/hubcontext.go rename to cmd/crowdsec-cli/cliitem/context.go index 2a777327379..7d110b8203d 100644 --- a/cmd/crowdsec-cli/hubcontext.go +++ b/cmd/crowdsec-cli/cliitem/context.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIContext(cfg configGetter) *cliItem { +func NewContext(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.CONTEXTS, diff --git a/cmd/crowdsec-cli/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go similarity index 95% rename from cmd/crowdsec-cli/hubscenario.go rename to cmd/crowdsec-cli/cliitem/hubscenario.go index 4434b9a2c45..a5e854b3c82 100644 --- a/cmd/crowdsec-cli/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIScenario(cfg configGetter) *cliItem { +func NewScenario(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.SCENARIOS, diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/cliitem/item.go similarity index 99% rename from cmd/crowdsec-cli/itemcli.go rename to cmd/crowdsec-cli/cliitem/item.go index a5629b425b9..28828eb9c95 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "cmp" @@ -18,6 +18,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -30,6 +31,8 @@ type cliHelp struct { example string } +type configGetter func() *csconfig.Config + type cliItem struct { cfg configGetter name string // plural, as used in the hub index diff --git a/cmd/crowdsec-cli/hubparser.go b/cmd/crowdsec-cli/cliitem/parser.go similarity index 95% rename from cmd/crowdsec-cli/hubparser.go rename to cmd/crowdsec-cli/cliitem/parser.go index cc856cbedb9..bc1d96bdaf0 100644 --- a/cmd/crowdsec-cli/hubparser.go +++ b/cmd/crowdsec-cli/cliitem/parser.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIParser(cfg configGetter) *cliItem { +func NewParser(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.PARSERS, diff --git a/cmd/crowdsec-cli/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/postoverflow.go similarity index 95% rename from cmd/crowdsec-cli/hubpostoverflow.go rename to cmd/crowdsec-cli/cliitem/postoverflow.go index 3fd45fd113d..ea53aef327d 100644 --- a/cmd/crowdsec-cli/hubpostoverflow.go +++ b/cmd/crowdsec-cli/cliitem/postoverflow.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIPostOverflow(cfg configGetter) *cliItem { +func NewPostOverflow(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.POSTOVERFLOWS, diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go similarity index 99% rename from cmd/crowdsec-cli/item_suggest.go rename to cmd/crowdsec-cli/cliitem/suggest.go index 7d3e1e728ae..5b080722af9 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/cliitem/suggest.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "fmt" diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 2a1f5ac7ebe..6f8e93e463c 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliitem" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" @@ -270,13 +271,13 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) cmd.AddCommand(clisupport.New(cli.cfg).NewCommand()) cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIPostOverflow(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIContext(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAppsecConfig(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewCollection(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewParser(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewScenario(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewPostOverflow(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewContext(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewAppsecConfig(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) From ae5e99ff13853fe48c0b6b54a806581387a01d9f Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 3 Sep 2024 12:08:18 +0200 Subject: [PATCH 271/318] update grokky and deps (#3206) * update grokky and deps * mod tidy --------- Co-authored-by: marco --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 9377e116519..bce1ca12316 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.15 - github.com/crowdsecurity/grokky v0.2.1 + github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.4.2 @@ -80,11 +80,11 @@ require ( github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 - github.com/wasilibs/go-re2 v1.3.0 + github.com/wasilibs/go-re2 v1.6.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 golang.org/x/mod v0.15.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.24.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -152,7 +152,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magefile/mage v1.15.0 // indirect + github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -185,7 +185,7 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tetratelabs/wazero v1.2.1 // indirect + github.com/tetratelabs/wazero v1.8.0 // indirect github.com/tidwall/gjson v1.17.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect diff --git a/go.sum b/go.sum index b76c7fccd1a..eec85b5b2e9 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= -github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= -github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= +github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= +github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= github.com/crowdsecurity/machineid v1.0.2/go.mod h1:XWUSlnS0R0+u/JK5ulidwlbceNT3ZOCKteoVQEn6Luo= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -465,8 +465,8 @@ github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffkt github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= -github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a h1:tdPcGgyiH0K+SbsJBBm2oPyEIOTAvLBwD9TuUwVtZho= +github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -661,8 +661,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= -github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= +github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -690,8 +690,8 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wasilibs/go-re2 v1.3.0 h1:LFhBNzoStM3wMie6rN2slD1cuYH2CGiHpvNL3UtcsMw= -github.com/wasilibs/go-re2 v1.3.0/go.mod h1:AafrCXVvGRJJOImMajgJ2M7rVmWyisVK7sFshbxnVrg= +github.com/wasilibs/go-re2 v1.6.0 h1:CLlhDebt38wtl/zz4ww+hkXBMcxjrKFvTDXzFW2VOz8= +github.com/wasilibs/go-re2 v1.6.0/go.mod h1:prArCyErsypRBI/jFAFJEbzyHzjABKqkzlidF0SNA04= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 5a50fd06bb9d7b6ac34dc8db5dee7662285c48af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:25:30 +0200 Subject: [PATCH 272/318] refact: reduce code nesting (acquisition/file, tests) (#3200) * reduce if nesting * lint: gocritic (nestingReduce) * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/cliconsole/console.go | 32 ++-- pkg/acquisition/modules/file/file.go | 175 +++++++++--------- .../internal/parser/rfc3164/parse_test.go | 162 ++++++---------- pkg/exprhelpers/debugger_test.go | 36 ++-- pkg/leakybucket/manager_load.go | 60 +++--- 6 files changed, 218 insertions(+), 249 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c59ab372799..78b666d25b4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -27,7 +27,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 20 + min-complexity: 19 nlreturn: block-size: 5 diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 995a082c514..d15f25eaf69 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -31,12 +31,12 @@ import ( type configGetter func() *csconfig.Config type cliConsole struct { - cfg configGetter + cfg configGetter } func New(cfg configGetter) *cliConsole { return &cliConsole{ - cfg: cfg, + cfg: cfg, } } @@ -88,23 +88,25 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st } for _, availableOpt := range csconfig.CONSOLE_CONFIGS { - if opt == availableOpt { - valid = true - enable := true - - for _, enabledOpt := range enableOpts { - if opt == enabledOpt { - enable = false - continue - } - } + if opt != availableOpt { + continue + } + + valid = true + enable := true - if enable { - enableOpts = append(enableOpts, opt) + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { + enable = false + continue } + } - break + if enable { + enableOpts = append(enableOpts, opt) } + + break } if !valid { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 34a7052f46f..4f7880baa89 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -426,118 +426,122 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { return nil } - if event.Op&fsnotify.Create == fsnotify.Create { - fi, err := os.Stat(event.Name) - if err != nil { - logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) - continue - } + if event.Op&fsnotify.Create != fsnotify.Create { + continue + } - if fi.IsDir() { - continue - } + fi, err := os.Stat(event.Name) + if err != nil { + logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) + continue + } - logger.Debugf("Detected new file %s", event.Name) + if fi.IsDir() { + continue + } - matched := false + logger.Debugf("Detected new file %s", event.Name) - for _, pattern := range f.config.Filenames { - logger.Debugf("Matching %s with %s", pattern, event.Name) + matched := false - matched, err = filepath.Match(pattern, event.Name) - if err != nil { - logger.Errorf("Could not match pattern : %s", err) - continue - } + for _, pattern := range f.config.Filenames { + logger.Debugf("Matching %s with %s", pattern, event.Name) - if matched { - logger.Debugf("Matched %s with %s", pattern, event.Name) - break - } + matched, err = filepath.Match(pattern, event.Name) + if err != nil { + logger.Errorf("Could not match pattern : %s", err) + continue } - if !matched { - continue + if matched { + logger.Debugf("Matched %s with %s", pattern, event.Name) + break } + } - // before opening the file, check if we need to specifically avoid it. (XXX) - skip := false + if !matched { + continue + } - for _, pattern := range f.exclude_regexps { - if pattern.MatchString(event.Name) { - f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) + // before opening the file, check if we need to specifically avoid it. (XXX) + skip := false - skip = true + for _, pattern := range f.exclude_regexps { + if pattern.MatchString(event.Name) { + f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) - break - } - } + skip = true - if skip { - continue + break } + } - f.tailMapMutex.RLock() - if f.tails[event.Name] { - f.tailMapMutex.RUnlock() - // we already have a tail on it, do not start a new one - logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + if skip { + continue + } - break - } + f.tailMapMutex.RLock() + if f.tails[event.Name] { f.tailMapMutex.RUnlock() - // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - // do not rely on stat, reclose file immediately as it's opened by Tail - fd, err := os.Open(event.Name) - if err != nil { - f.logger.Errorf("unable to read %s : %s", event.Name, err) - continue - } - if err := fd.Close(); err != nil { - f.logger.Errorf("unable to close %s : %s", event.Name, err) - continue - } + // we already have a tail on it, do not start a new one + logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) - pollFile := false - if f.config.PollWithoutInotify != nil { - pollFile = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(event.Name) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) - } - f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - if networkFS { - pollFile = true - } - } + break + } + f.tailMapMutex.RUnlock() + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail + fd, err := os.Open(event.Name) + if err != nil { + f.logger.Errorf("unable to read %s : %s", event.Name, err) + continue + } - filink, err := os.Lstat(event.Name) + if err = fd.Close(); err != nil { + f.logger.Errorf("unable to close %s : %s", event.Name, err) + continue + } + pollFile := false + if f.config.PollWithoutInotify != nil { + pollFile = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(event.Name) if err != nil { - logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) - continue + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } - if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { - logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) - } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - //Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) - if err != nil { - logger.Errorf("Could not start tailing file %s : %s", event.Name, err) - break + if networkFS { + pollFile = true } + } + + filink, err := os.Lstat(event.Name) + if err != nil { + logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) + continue + } - f.tailMapMutex.Lock() - f.tails[event.Name] = true - f.tailMapMutex.Unlock() - t.Go(func() error { - defer trace.CatchPanic("crowdsec/acquis/tailfile") - return f.tailFile(out, t, tail) - }) + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) } + + // Slightly different parameters for Location, as we want to read the first lines of the newly created file + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + if err != nil { + logger.Errorf("Could not start tailing file %s : %s", event.Name, err) + break + } + + f.tailMapMutex.Lock() + f.tails[event.Name] = true + f.tailMapMutex.Unlock() + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/tailfile") + return f.tailFile(out, t, tail) + }) case err, ok := <-f.watcher.Errors: if !ok { return nil @@ -571,8 +575,9 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai return nil case <-tail.Dying(): // our tailer is dying - err := tail.Err() errMsg := fmt.Sprintf("file reader of %s died", tail.Filename) + + err := tail.Err() if err != nil { errMsg = fmt.Sprintf(errMsg+" : %s", err) } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go index 8fb5089a61f..3af6614bce6 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -4,6 +4,10 @@ import ( "fmt" "testing" "time" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestPri(t *testing.T) { @@ -26,28 +30,20 @@ func TestPri(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parsePRI() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.PRI != test.expected { - t.Errorf("expected %d, got %d", test.expected, r.PRI) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.PRI) }) } } func TestTimestamp(t *testing.T) { - tests := []struct { input string expected string @@ -68,25 +64,19 @@ func TestTimestamp(t *testing.T) { if test.currentYear { opts = append(opts, WithCurrentYear()) } + r := NewRFC3164Parser(opts...) r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseTimestamp() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Timestamp.Format(time.RFC3339) != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Timestamp.Format(time.RFC3339)) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Timestamp.Format(time.RFC3339)) }) } } @@ -121,25 +111,19 @@ func TestHostname(t *testing.T) { if test.strictHostname { opts = append(opts, WithStrictHostname()) } + r := NewRFC3164Parser(opts...) r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseHostname() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Hostname != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Hostname) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Hostname) }) } } @@ -164,27 +148,16 @@ func TestTag(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseTag() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Tag != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Tag) - } - if r.PID != test.expectedPID { - t.Errorf("expected %s, got %s", test.expected, r.Message) - } - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Tag) + assert.Equal(t, test.expectedPID, r.PID) }) } } @@ -207,22 +180,15 @@ func TestMessage(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseMessage() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Message != test.expected { - t.Errorf("expected message %s, got %s", test.expected, r.Tag) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Message) }) } } @@ -236,6 +202,7 @@ func TestParse(t *testing.T) { Message string PRI int } + tests := []struct { input string expected expected @@ -326,39 +293,20 @@ func TestParse(t *testing.T) { for _, test := range tests { t.Run(test.input, func(t *testing.T) { r := NewRFC3164Parser(test.opts...) + err := r.Parse([]byte(test.input)) - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error '%s', got '%s'", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: '%s'", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error '%s', got no error", test.expectedErr) - } else { - if r.Timestamp != test.expected.Timestamp { - t.Errorf("expected timestamp '%s', got '%s'", test.expected.Timestamp, r.Timestamp) - } - if r.Hostname != test.expected.Hostname { - t.Errorf("expected hostname '%s', got '%s'", test.expected.Hostname, r.Hostname) - } - if r.Tag != test.expected.Tag { - t.Errorf("expected tag '%s', got '%s'", test.expected.Tag, r.Tag) - } - if r.PID != test.expected.PID { - t.Errorf("expected pid '%s', got '%s'", test.expected.PID, r.PID) - } - if r.Message != test.expected.Message { - t.Errorf("expected message '%s', got '%s'", test.expected.Message, r.Message) - } - if r.PRI != test.expected.PRI { - t.Errorf("expected pri '%d', got '%d'", test.expected.PRI, r.PRI) - } - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected.Timestamp, r.Timestamp) + assert.Equal(t, test.expected.Hostname, r.Hostname) + assert.Equal(t, test.expected.Tag, r.Tag) + assert.Equal(t, test.expected.PID, r.PID) + assert.Equal(t, test.expected.Message, r.Message) + assert.Equal(t, test.expected.PRI, r.PRI) }) } } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index efdcbc1a769..32144454084 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -26,6 +26,7 @@ type ExprDbgTest struct { func UpperTwo(params ...any) (any, error) { s := params[0].(string) v := params[1].(string) + return strings.ToUpper(s) + strings.ToUpper(v), nil } @@ -33,6 +34,7 @@ func UpperThree(params ...any) (any, error) { s := params[0].(string) v := params[1].(string) x := params[2].(string) + return strings.ToUpper(s) + strings.ToUpper(v) + strings.ToUpper(x), nil } @@ -41,6 +43,7 @@ func UpperN(params ...any) (any, error) { v := params[1].(string) x := params[2].(string) y := params[3].(string) + return strings.ToUpper(s) + strings.ToUpper(v) + strings.ToUpper(x) + strings.ToUpper(y), nil } @@ -76,9 +79,9 @@ func TestBaseDbg(t *testing.T) { // use '%#v' to dump in golang syntax // use regexp to clear empty/default fields: // [a-z]+: (false|\[\]string\(nil\)|""), - //ConditionResult:(*bool) + // ConditionResult:(*bool) - //Missing multi parametes function + // Missing multi parametes function tests := []ExprDbgTest{ { Name: "nil deref", @@ -272,6 +275,7 @@ func TestBaseDbg(t *testing.T) { } logger := log.WithField("test", "exprhelpers") + for _, test := range tests { if test.LogLevel != 0 { log.SetLevel(test.LogLevel) @@ -308,10 +312,13 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected compile error : %s", test.Name, err) } } + if test.Name == "nil deref" { test.Env["nilvar"] = nil } + outdbg, ret, err := RunWithDebug(prog, test.Env, logger) + if test.ExpectedFailRuntime { if err == nil { t.Fatalf("test %s : expected runtime error", test.Name) @@ -321,25 +328,30 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected runtime error : %s", test.Name, err) } } + log.SetLevel(log.DebugLevel) DisplayExprDebug(prog, outdbg, logger, ret) + if len(outdbg) != len(test.ExpectedOutputs) { t.Errorf("failed test %s", test.Name) t.Errorf("%#v", outdbg) - //out, _ := yaml.Marshal(outdbg) - //fmt.Printf("%s", string(out)) + // out, _ := yaml.Marshal(outdbg) + // fmt.Printf("%s", string(out)) t.Fatalf("test %s : expected %d outputs, got %d", test.Name, len(test.ExpectedOutputs), len(outdbg)) - } + for i, out := range outdbg { - if !reflect.DeepEqual(out, test.ExpectedOutputs[i]) { - spew.Config.DisableMethods = true - t.Errorf("failed test %s", test.Name) - t.Errorf("expected : %#v", test.ExpectedOutputs[i]) - t.Errorf("got : %#v", out) - t.Fatalf("%d/%d : mismatch", i, len(outdbg)) + if reflect.DeepEqual(out, test.ExpectedOutputs[i]) { + // DisplayExprDebug(prog, outdbg, logger, ret) + continue } - //DisplayExprDebug(prog, outdbg, logger, ret) + + spew.Config.DisableMethods = true + + t.Errorf("failed test %s", test.Name) + t.Errorf("expected : %#v", test.ExpectedOutputs[i]) + t.Errorf("got : %#v", out) + t.Fatalf("%d/%d : mismatch", i, len(outdbg)) } } } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1d523759f2b..6055a5308b5 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -509,37 +509,39 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac found := false for _, h := range bucketFactories { - if h.Name == v.Name { - log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) - // check in which mode the bucket was - if v.Mode == types.TIMEMACHINE { - tbucket = NewTimeMachine(h) - } else if v.Mode == types.LIVE { - tbucket = NewLeaky(h) - } else { - log.Errorf("Unknown bucket type : %d", v.Mode) - } - /*Trying to restore queue state*/ - tbucket.Queue = v.Queue - /*Trying to set the limiter to the saved values*/ - tbucket.Limiter.Load(v.SerializedState) - tbucket.In = make(chan *types.Event) - tbucket.Mapkey = k - tbucket.Signal = make(chan bool, 1) - tbucket.First_ts = v.First_ts - tbucket.Last_ts = v.Last_ts - tbucket.Ovflw_ts = v.Ovflw_ts - tbucket.Total_count = v.Total_count - buckets.Bucket_map.Store(k, tbucket) - h.tomb.Go(func() error { - return LeakRoutine(tbucket) - }) - <-tbucket.Signal - - found = true + if h.Name != v.Name { + continue + } - break + log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) + // check in which mode the bucket was + if v.Mode == types.TIMEMACHINE { + tbucket = NewTimeMachine(h) + } else if v.Mode == types.LIVE { + tbucket = NewLeaky(h) + } else { + log.Errorf("Unknown bucket type : %d", v.Mode) } + /*Trying to restore queue state*/ + tbucket.Queue = v.Queue + /*Trying to set the limiter to the saved values*/ + tbucket.Limiter.Load(v.SerializedState) + tbucket.In = make(chan *types.Event) + tbucket.Mapkey = k + tbucket.Signal = make(chan bool, 1) + tbucket.First_ts = v.First_ts + tbucket.Last_ts = v.Last_ts + tbucket.Ovflw_ts = v.Ovflw_ts + tbucket.Total_count = v.Total_count + buckets.Bucket_map.Store(k, tbucket) + h.tomb.Go(func() error { + return LeakRoutine(tbucket) + }) + <-tbucket.Signal + + found = true + + break } if !found { From bc6be99b973f34cb5948caa09e4ac329f048cbca Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:37:38 +0200 Subject: [PATCH 273/318] cscli refact: package clialert, clidecision (#3203) * cscli refact: package clialert, clidecision * refact: function SanitizeScope() * lint --- cmd/crowdsec-cli/{ => clialert}/alerts.go | 17 +++++--- cmd/crowdsec-cli/clialert/sanitize.go | 26 ++++++++++++ .../{alerts_table.go => clialert/table.go} | 2 +- .../{ => clidecision}/decisions.go | 24 +++++++---- .../{ => clidecision}/decisions_import.go | 6 +-- .../{ => clidecision}/decisions_table.go | 2 +- cmd/crowdsec-cli/main.go | 6 ++- cmd/crowdsec-cli/utils.go | 40 ------------------- pkg/apiserver/controllers/v1/alerts.go | 22 ++-------- pkg/types/event.go | 38 ++++++++++++++---- test/bats/90_decisions.bats | 4 +- 11 files changed, 98 insertions(+), 89 deletions(-) rename cmd/crowdsec-cli/{ => clialert}/alerts.go (97%) create mode 100644 cmd/crowdsec-cli/clialert/sanitize.go rename cmd/crowdsec-cli/{alerts_table.go => clialert/table.go} (99%) rename cmd/crowdsec-cli/{ => clidecision}/decisions.go (96%) rename cmd/crowdsec-cli/{ => clidecision}/decisions_import.go (99%) rename cmd/crowdsec-cli/{ => clidecision}/decisions_table.go (98%) delete mode 100644 cmd/crowdsec-cli/utils.go diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go similarity index 97% rename from cmd/crowdsec-cli/alerts.go rename to cmd/crowdsec-cli/clialert/alerts.go index 37f9ab435c7..13013153a79 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -1,4 +1,4 @@ -package main +package clialert import ( "context" @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -183,12 +184,14 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return nil } +type configGetter func() *csconfig.Config + type cliAlerts struct { client *apiclient.ApiClient cfg configGetter } -func NewCLIAlerts(getconfig configGetter) *cliAlerts { +func New(getconfig configGetter) *cliAlerts { return &cliAlerts{ cfg: getconfig, } @@ -235,8 +238,10 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { } func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { - if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, - alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { + var err error + + *alertListFilter.ScopeEquals, err = SanitizeScope(*alertListFilter.ScopeEquals, *alertListFilter.IPEquals, *alertListFilter.RangeEquals) + if err != nil { return err } @@ -378,8 +383,8 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ var err error if !AlertDeleteAll { - if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, - alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + *alertDeleteFilter.ScopeEquals, err = SanitizeScope(*alertDeleteFilter.ScopeEquals, *alertDeleteFilter.IPEquals, *alertDeleteFilter.RangeEquals) + if err != nil { return err } diff --git a/cmd/crowdsec-cli/clialert/sanitize.go b/cmd/crowdsec-cli/clialert/sanitize.go new file mode 100644 index 00000000000..87b110649da --- /dev/null +++ b/cmd/crowdsec-cli/clialert/sanitize.go @@ -0,0 +1,26 @@ +package clialert + +import ( + "fmt" + "net" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// SanitizeScope validates ip and range and sets the scope accordingly to our case convention. +func SanitizeScope(scope, ip, ipRange string) (string, error) { + if ipRange != "" { + _, _, err := net.ParseCIDR(ipRange) + if err != nil { + return "", fmt.Errorf("%s is not a valid range", ipRange) + } + } + + if ip != "" { + if net.ParseIP(ip) == nil { + return "", fmt.Errorf("%s is not a valid ip", ip) + } + } + + return types.NormalizeScope(scope), nil +} diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/clialert/table.go similarity index 99% rename from cmd/crowdsec-cli/alerts_table.go rename to cmd/crowdsec-cli/clialert/table.go index 29383457ced..5dec63ec152 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -1,4 +1,4 @@ -package main +package clialert import ( "fmt" diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go similarity index 96% rename from cmd/crowdsec-cli/decisions.go rename to cmd/crowdsec-cli/clidecision/decisions.go index d485c90254f..5ecb3fc3304 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "context" @@ -17,7 +17,9 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -114,12 +116,14 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin return nil } +type configGetter func() *csconfig.Config + type cliDecisions struct { client *apiclient.ApiClient cfg configGetter } -func NewCLIDecisions(cfg configGetter) *cliDecisions { +func New(cfg configGetter) *cliDecisions { return &cliDecisions{ cfg: cfg, } @@ -170,8 +174,9 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { var err error - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + + *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) + if err != nil { return err } @@ -326,8 +331,10 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a stopAt := time.Now().UTC().Format(time.RFC3339) createdAt := time.Now().UTC().Format(time.RFC3339) - /*take care of shorthand options*/ - if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + var err error + + addScope, err = clialert.SanitizeScope(addScope, addIP, addRange) + if err != nil { return err } @@ -381,7 +388,7 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a } alerts = append(alerts, &alert) - _, _, err := cli.client.Alerts.Add(context.Background(), alerts) + _, _, err = cli.client.Alerts.Add(context.Background(), alerts) if err != nil { return err } @@ -435,7 +442,8 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci var err error /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + *delFilter.ScopeEquals, err = clialert.SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) + if err != nil { return err } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/clidecision/decisions_import.go similarity index 99% rename from cmd/crowdsec-cli/decisions_import.go rename to cmd/crowdsec-cli/clidecision/decisions_import.go index 338c1b7fb3e..10d92f88876 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/clidecision/decisions_import.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "bufio" @@ -122,8 +122,8 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } var ( - content []byte - fin *os.File + content []byte + fin *os.File ) // set format if the file has a json or csv extension diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/clidecision/decisions_table.go similarity index 98% rename from cmd/crowdsec-cli/decisions_table.go rename to cmd/crowdsec-cli/clidecision/decisions_table.go index 02952f93b85..90a0ae1176b 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/clidecision/decisions_table.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "fmt" diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 6f8e93e463c..01179cf93be 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,9 +14,11 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clidecision" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" @@ -257,8 +259,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) + cmd.AddCommand(clidecision.New(cli.cfg).NewCommand()) + cmd.AddCommand(clialert.New(cli.cfg).NewCommand()) cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) cmd.AddCommand(clibouncer.New(cli.cfg).NewCommand()) cmd.AddCommand(climachine.New(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go deleted file mode 100644 index c51140836b8..00000000000 --- a/cmd/crowdsec-cli/utils.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "fmt" - "net" - "strings" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { - /*if a range is provided, change the scope*/ - if *ipRange != "" { - _, _, err := net.ParseCIDR(*ipRange) - if err != nil { - return fmt.Errorf("%s isn't a valid range", *ipRange) - } - } - - if *ip != "" { - ipRepr := net.ParseIP(*ip) - if ipRepr == nil { - return fmt.Errorf("%s isn't a valid ip", *ip) - } - } - - // avoid confusion on scope (ip vs Ip and range vs Range) - switch strings.ToLower(*scope) { - case "ip": - *scope = types.Ip - case "range": - *scope = types.Range - case "country": - *scope = types.Country - case "as": - *scope = types.AS - } - - return nil -} diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 82dc51d6879..3d4309b1347 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -6,7 +6,6 @@ import ( "net" "net/http" "strconv" - "strings" "time" "github.com/gin-gonic/gin" @@ -124,21 +123,6 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin } } -func normalizeScope(scope string) string { - switch strings.ToLower(scope) { - case "ip": - return types.Ip - case "range": - return types.Range - case "as": - return types.AS - case "country": - return types.Country - default: - return scope - } -} - // CreateAlert writes the alerts received in the body to the database func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest @@ -160,12 +144,12 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { for _, alert := range input { // normalize scope for alert.Source and decisions if alert.Source.Scope != nil { - *alert.Source.Scope = normalizeScope(*alert.Source.Scope) + *alert.Source.Scope = types.NormalizeScope(*alert.Source.Scope) } for _, decision := range alert.Decisions { if decision.Scope != nil { - *decision.Scope = normalizeScope(*decision.Scope) + *decision.Scope = types.NormalizeScope(*decision.Scope) } } @@ -296,8 +280,8 @@ func (c *Controller) FindAlerts(gctx *gin.Context) { // FindAlertByID returns the alert associated with the ID func (c *Controller) FindAlertByID(gctx *gin.Context) { alertIDStr := gctx.Param("alert_id") - alertID, err := strconv.Atoi(alertIDStr) + alertID, err := strconv.Atoi(alertIDStr) if err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) return diff --git a/pkg/types/event.go b/pkg/types/event.go index 76a447bdc8c..e016d0294c4 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -2,6 +2,7 @@ package types import ( "net" + "strings" "time" "github.com/expr-lang/expr/vm" @@ -19,11 +20,11 @@ const ( // Event is the structure representing a runtime event (log or overflow) type Event struct { /* is it a log or an overflow */ - Type int `yaml:"Type,omitempty" json:"Type,omitempty"` //Can be types.LOG (0) or types.OVFLOW (1) - ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` //how to buckets should handle event : types.TIMEMACHINE or types.LIVE + Type int `yaml:"Type,omitempty" json:"Type,omitempty"` // Can be types.LOG (0) or types.OVFLOW (1) + ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` // how to buckets should handle event : types.TIMEMACHINE or types.LIVE Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` WhitelistReason string `yaml:"WhitelistReason,omitempty" json:"whitelist_reason,omitempty"` - //should add whitelist reason ? + // should add whitelist reason ? /* the current stage of the line being parsed */ Stage string `yaml:"Stage,omitempty" json:"Stage,omitempty"` /* original line (produced by acquisition) */ @@ -36,11 +37,11 @@ type Event struct { Unmarshaled map[string]interface{} `yaml:"Unmarshaled,omitempty" json:"Unmarshaled,omitempty"` /* Overflow */ Overflow RuntimeAlert `yaml:"Overflow,omitempty" json:"Alert,omitempty"` - Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` //parsed time `json:"-"` `` + Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` // parsed time `json:"-"` `` StrTime string `yaml:"StrTime,omitempty" json:"StrTime,omitempty"` StrTimeFormat string `yaml:"StrTimeFormat,omitempty" json:"StrTimeFormat,omitempty"` MarshaledTime string `yaml:"MarshaledTime,omitempty" json:"MarshaledTime,omitempty"` - Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` //can be set to false to avoid processing line + Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` // can be set to false to avoid processing line Appsec AppsecEvent `yaml:"Appsec,omitempty" json:"Appsec,omitempty"` /* Meta is the only part that will make it to the API - it should be normalized */ Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` @@ -50,7 +51,9 @@ func (e *Event) SetMeta(key string, value string) bool { if e.Meta == nil { e.Meta = make(map[string]string) } + e.Meta[key] = value + return true } @@ -58,7 +61,9 @@ func (e *Event) SetParsed(key string, value string) bool { if e.Parsed == nil { e.Parsed = make(map[string]string) } + e.Parsed[key] = value + return true } @@ -90,11 +95,13 @@ func (e *Event) GetMeta(key string) string { } } } + return "" } func (e *Event) ParseIPSources() []net.IP { var srcs []net.IP + switch e.Type { case LOG: if _, ok := e.Meta["source_ip"]; ok { @@ -105,6 +112,7 @@ func (e *Event) ParseIPSources() []net.IP { srcs = append(srcs, net.ParseIP(k)) } } + return srcs } @@ -131,8 +139,8 @@ type RuntimeAlert struct { Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` Reprocess bool `yaml:"Reprocess,omitempty" json:"Reprocess,omitempty"` Sources map[string]models.Source `yaml:"Sources,omitempty" json:"Sources,omitempty"` - Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` //this one is a pointer to APIAlerts[0] for convenience. - //APIAlerts will be populated at the end when there is more than one source + Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` // this one is a pointer to APIAlerts[0] for convenience. + // APIAlerts will be populated at the end when there is more than one source APIAlerts []models.Alert `yaml:"APIAlerts,omitempty" json:"APIAlerts,omitempty"` } @@ -141,5 +149,21 @@ func (r RuntimeAlert) GetSources() []string { for key := range r.Sources { ret = append(ret, key) } + return ret } + +func NormalizeScope(scope string) string { + switch strings.ToLower(scope) { + case "ip": + return Ip + case "range": + return Range + case "as": + return AS + case "country": + return Country + default: + return scope + } +} diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index c7ed214ffc9..b892dc84015 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -108,12 +108,12 @@ teardown() { # invalid json rune -1 cscli decisions import -i - <<<'{"blah":"blah"}' --format json assert_stderr --partial 'Parsing json' - assert_stderr --partial 'json: cannot unmarshal object into Go value of type []main.decisionRaw' + assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' # json with extra data rune -1 cscli decisions import -i - <<<'{"values":"1.2.3.4","blah":"blah"}' --format json assert_stderr --partial 'Parsing json' - assert_stderr --partial 'json: cannot unmarshal object into Go value of type []main.decisionRaw' + assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' #---------- # CSV From fb0117e77845904e0b58251d72460b6c3e26d00e Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 3 Sep 2024 14:27:36 +0100 Subject: [PATCH 274/318] enhance: add additional explain options to hubtest (#3162) * enhance: add additional explain options to hubtest * Revert "enhance: add additional explain options to hubtest" This reverts commit b24632f3eb473e3c42885f31827764d2b7eebe2d. * enhance: add additional explain options to hubtest --------- Co-authored-by: marco --- cmd/crowdsec-cli/clihubtest/explain.go | 77 +++++++++++++++++--------- 1 file changed, 51 insertions(+), 26 deletions(-) diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index ecaf520211e..4183b6a515d 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -8,7 +8,52 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/dumps" ) + +func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { + return fmt.Errorf("unable to load parser result after run: %w", err) + } + } + + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { + return fmt.Errorf("unable to load scenario result after run: %w", err) + } + } + + opts := dumps.DumpOpts{ + Details: details, + SkipOk: skipOk, + } + + dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) + + return nil +} + + func (cli *cliHubTest) NewExplainCmd() *cobra.Command { + var ( + details bool + skipOk bool + ) + cmd := &cobra.Command{ Use: "explain", Short: "explain [test_name]", @@ -16,38 +61,18 @@ func (cli *cliHubTest) NewExplainCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { for _, testName := range args { - test, err := HubTest.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) + if err := cli.explain(testName, details, skipOk); err != nil { + return err } - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %w", err) - } - } - - err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %w", err) - } - } - opts := dumps.DumpOpts{} - dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) } return nil }, } + flags := cmd.Flags() + flags.BoolVarP(&details, "verbose", "v", false, "Display individual changes") + flags.BoolVar(&skipOk, "failures", false, "Only show failed lines") + return cmd } From ace942a36d01e88b387d0b9a7a86864c89767fae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:31:00 +0200 Subject: [PATCH 275/318] tests: prevent "make bats-fixture" to run if instance-data is locked (#3201) * tests: prevent "make bats-fixture" to run if instance-data is locked * lint --- test/instance-data | 14 ++++++++++++-- test/run-tests | 18 +++++++++--------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/test/instance-data b/test/instance-data index e4e76d3980a..e7fd05a9e54 100755 --- a/test/instance-data +++ b/test/instance-data @@ -1,16 +1,26 @@ #!/usr/bin/env bash +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh +if [[ -f "$LOCAL_INIT_DIR/.lock" ]] && [[ "$1" != "unlock" ]]; then + die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" +fi + backend_script="./lib/config/config-${CONFIG_BACKEND}" if [[ ! -x "$backend_script" ]]; then - echo "unknown config backend '${CONFIG_BACKEND}'" >&2 - exit 1 + die "unknown config backend '${CONFIG_BACKEND}'" fi exec "$backend_script" "$@" diff --git a/test/run-tests b/test/run-tests index 6fe3bd004e2..957eb663b9c 100755 --- a/test/run-tests +++ b/test/run-tests @@ -10,12 +10,12 @@ die() { # shellcheck disable=SC1007 TEST_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck source=./.environment.sh -. "${TEST_DIR}/.environment.sh" +. "$TEST_DIR/.environment.sh" -"${TEST_DIR}/bin/check-requirements" +"$TEST_DIR/bin/check-requirements" echo "Running tests..." -echo "DB_BACKEND: ${DB_BACKEND}" +echo "DB_BACKEND: $DB_BACKEND" if [[ -z "$TEST_COVERAGE" ]]; then echo "Coverage report: no" else @@ -24,23 +24,23 @@ fi [[ -f "$LOCAL_INIT_DIR/.lock" ]] && die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" -dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" +dump_backend="$(cat "$LOCAL_INIT_DIR/.backend")" if [[ "$DB_BACKEND" != "$dump_backend" ]]; then - die "Can't run with backend '${DB_BACKEND}' because the test data was build with '${dump_backend}'" + die "Can't run with backend '$DB_BACKEND' because the test data was build with '$dump_backend'" fi if [[ $# -ge 1 ]]; then echo "test files: $*" - "${TEST_DIR}/lib/bats-core/bin/bats" \ + "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ "$@" else - echo "test files: ${TEST_DIR}/bats ${TEST_DIR}/dyn-bats" - "${TEST_DIR}/lib/bats-core/bin/bats" \ + echo "test files: $TEST_DIR/bats $TEST_DIR/dyn-bats" + "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ - "${TEST_DIR}/bats" "${TEST_DIR}/dyn-bats" + "$TEST_DIR/bats" "$TEST_DIR/dyn-bats" fi From 57dee1abf9e6f31d70f3556bad57e6d91f163918 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:36:23 +0200 Subject: [PATCH 276/318] fix appsec/tls issues by cloning http transport (#3213) --- pkg/apiclient/client.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 5669fd24786..2cb68f597f3 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -85,6 +85,11 @@ func NewClient(config *Config) (*ApiClient, error) { transport, baseURL := createTransport(config.URL) if transport != nil { t.Transport = transport + } else { + // can be httpmock.MockTransport + if ht, ok := http.DefaultTransport.(*http.Transport); ok { + t.Transport = ht.Clone() + } } t.URL = baseURL @@ -96,8 +101,8 @@ func NewClient(config *Config) (*ApiClient, error) { tlsconfig.Certificates = []tls.Certificate{*Cert} } - if ht, ok := http.DefaultTransport.(*http.Transport); ok { - ht.TLSClientConfig = &tlsconfig + if t.Transport != nil { + t.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} @@ -124,6 +129,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt client.Transport = transport } else { if ht, ok := http.DefaultTransport.(*http.Transport); ok { + ht = ht.Clone() tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} tlsconfig.RootCAs = CaCertPool @@ -165,7 +171,8 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { tlsconfig.Certificates = []tls.Certificate{*Cert} } - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + client.Transport = http.DefaultTransport.(*http.Transport).Clone() + client.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } } else if client.Transport == nil && transport != nil { client.Transport = transport From c8750f604c1f47adcff8ee7a07b8aca4312a14a4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 9 Sep 2024 11:27:20 +0200 Subject: [PATCH 277/318] refact: pkg/apiclient set and use default user agent (#3219) * default user agent * DRY default user agent * useragent.go * moved to pkg/apiclient/useragent * lint * rename useragent.DefaultUserAgent() -> useragent.Default() --- cmd/crowdsec-cli/clialert/alerts.go | 2 -- cmd/crowdsec-cli/clicapi/capi.go | 3 -- cmd/crowdsec-cli/cliconsole/console.go | 2 -- cmd/crowdsec-cli/clidecision/decisions.go | 2 -- cmd/crowdsec-cli/clilapi/lapi.go | 4 +-- .../clinotifications/notifications.go | 2 -- cmd/crowdsec/lapiclient.go | 2 -- .../loki/internal/lokiclient/loki_client.go | 4 +-- pkg/apiclient/alerts_service_test.go | 17 ++++------- pkg/apiclient/auth_service_test.go | 19 ++++++------ pkg/apiclient/client.go | 23 ++++++++++++--- pkg/apiclient/client_http_test.go | 6 +--- pkg/apiclient/client_test.go | 9 ------ pkg/apiclient/decisions_service_test.go | 29 ++++++++++--------- pkg/apiclient/useragent/useragent.go | 9 ++++++ pkg/apiserver/apic.go | 2 -- pkg/apiserver/apic_metrics_test.go | 3 +- pkg/apiserver/apic_test.go | 18 +++++------- pkg/cticlient/client.go | 5 ++-- pkg/cwhub/cwhub.go | 4 +-- pkg/cwversion/version.go | 8 ++--- pkg/metabase/api.go | 4 +-- 22 files changed, 82 insertions(+), 95 deletions(-) create mode 100644 pkg/apiclient/useragent/useragent.go diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 13013153a79..c5e27394f77 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -25,7 +25,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -217,7 +216,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 4d658e3a602..49f68dd6b9e 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -77,7 +76,6 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: capiUser, Password: password, - UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: "v3", }, nil) @@ -168,7 +166,6 @@ func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri MachineID: login, Password: passwd, Scenarios: itemsForAPI, - UserAgent: cwversion.UserAgent(), URL: apiURL, // I don't believe papi is neede to check enrollement // PapiURL: papiURL, diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index d15f25eaf69..e4b4039bdd2 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -24,7 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -124,7 +123,6 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, Scenarios: hub.GetInstalledListForAPI(), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v3", }) diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 5ecb3fc3304..b82ebe3086e 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -152,7 +151,6 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index a6b88101cbf..fa229002512 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -24,7 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" @@ -53,7 +52,7 @@ func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri client, err := apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, - cwversion.UserAgent(), + "", nil) if err != nil { return false, err @@ -118,7 +117,6 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string, t _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: lapiUser, Password: password, - UserAgent: cwversion.UserAgent(), RegistrationToken: token, URL: apiurl, VersionPrefix: LAPIURLPrefix, diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 04be09354c2..87a94243c5d 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -29,7 +29,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -464,7 +463,6 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al client, err := apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index cbafb460042..4556306825c 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -11,7 +11,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -34,7 +33,6 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. MachineID: credentials.Login, Password: password, Scenarios: itemsForAPI, - UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 420da6e391c..846e833abea 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -16,7 +16,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) type LokiClient struct { @@ -319,6 +319,6 @@ func NewLokiClient(config Config) *LokiClient { if config.Username != "" || config.Password != "" { headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) } - headers["User-Agent"] = cwversion.UserAgent() + headers["User-Agent"] = useragent.Default() return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 12ef2d295f4..0d1ff41685f 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -35,7 +34,6 @@ func TestAlertsListAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -180,16 +178,16 @@ func TestAlertsListAsMachine(t *testing.T) { }, } - //log.Debugf("data : -> %s", spew.Sdump(alerts)) - //log.Debugf("resp : -> %s", spew.Sdump(resp)) - //log.Debugf("expected : -> %s", spew.Sdump(expected)) - //first one returns data + // log.Debugf("data : -> %s", spew.Sdump(alerts)) + // log.Debugf("resp : -> %s", spew.Sdump(resp)) + // log.Debugf("expected : -> %s", spew.Sdump(expected)) + // first one returns data alerts, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, expected, *alerts) - //this one doesn't + // this one doesn't filter := AlertsListOpts{IPEquals: ptr.Of("1.2.3.4")} alerts, resp, err = client.Alerts.List(context.Background(), filter) @@ -214,7 +212,6 @@ func TestAlertsGetAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -360,7 +357,7 @@ func TestAlertsGetAsMachine(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *alerts) - //fail + // fail _, _, err = client.Alerts.GetByID(context.Background(), 2) cstest.RequireErrorMessage(t, err, "API error: object not found") } @@ -388,7 +385,6 @@ func TestAlertsCreateAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -430,7 +426,6 @@ func TestAlertsDeleteAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 6c9abc0edef..344e377ad0f 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -36,11 +35,13 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) newStr := buf.String() var payload BasicMockPayload + err := json.Unmarshal([]byte(newStr), &payload) if err != nil || payload.MachineID == "" || payload.Password == "" { log.Printf("Bad payload") @@ -48,8 +49,8 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { } var responseBody string - responseCode, hasFoundErrorMock := loginsForMockErrorCases[payload.MachineID] + responseCode, hasFoundErrorMock := loginsForMockErrorCases[payload.MachineID] if !hasFoundErrorMock { responseCode = http.StatusOK responseBody = `{"code":200,"expire":"2029-11-30T14:14:24+01:00","token":"toto"}` @@ -76,7 +77,7 @@ func TestWatcherRegister(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} initBasicMuxMock(t, mux, "/watchers") log.Printf("URL is %s", urlx) @@ -87,7 +88,6 @@ func TestWatcherRegister(t *testing.T) { clientconfig := Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", } @@ -113,7 +113,7 @@ func TestWatcherAuth(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} initBasicMuxMock(t, mux, "/watchers/login") log.Printf("URL is %s", urlx) @@ -121,11 +121,10 @@ func TestWatcherAuth(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok auth + // ok auth clientConfig := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -174,7 +173,7 @@ func TestWatcherUnregister(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") @@ -184,6 +183,7 @@ func TestWatcherUnregister(t *testing.T) { mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) @@ -206,7 +206,6 @@ func TestWatcherUnregister(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -229,6 +228,7 @@ func TestWatcherEnroll(t *testing.T) { mux.HandleFunc("/watchers/enroll", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) newStr := buf.String() @@ -260,7 +260,6 @@ func TestWatcherEnroll(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 2cb68f597f3..02a99037a04 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -12,6 +12,7 @@ import ( "github.com/golang-jwt/jwt/v4" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -66,11 +67,16 @@ type service struct { } func NewClient(config *Config) (*ApiClient, error) { + userAgent := config.UserAgent + if userAgent == "" { + userAgent = useragent.Default() + } + t := &JWTTransport{ MachineID: &config.MachineID, Password: &config.Password, Scenarios: config.Scenarios, - UserAgent: config.UserAgent, + UserAgent: userAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, RetryConfig: NewRetryConfig( @@ -105,7 +111,7 @@ func NewClient(config *Config) (*ApiClient, error) { t.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } - c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} + c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: userAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -143,6 +149,10 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt } } + if userAgent == "" { + userAgent = useragent.Default() + } + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: prefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) @@ -178,15 +188,20 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { client.Transport = transport } - c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + userAgent := config.UserAgent + if userAgent == "" { + userAgent = useragent.Default() + } + + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: config.VersionPrefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) - /*if we have http status, return it*/ if err != nil { + /*if we have http status, return it*/ if resp != nil && resp.Response != nil { return nil, fmt.Errorf("api register (%s) http %s: %w", c.BaseURL, resp.Response.Status, err) } diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 4bdfe1d0da5..45cd8410a8e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -10,22 +10,19 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestNewRequestInvalid(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //missing slash in uri + // missing slash in uri apiURL, err := url.Parse(urlx) require.NoError(t, err) client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -57,7 +54,6 @@ func TestNewRequestTimeout(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index bd83e512afc..e632ff428c0 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -17,8 +17,6 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) /*this is a ripoff of google/go-github approach : @@ -97,7 +95,6 @@ func TestNewClientOk(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -134,7 +131,6 @@ func TestNewClientOk_UnixSocket(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -172,7 +168,6 @@ func TestNewClientKo(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -250,7 +245,6 @@ func TestNewClientRegisterKO(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -281,7 +275,6 @@ func TestNewClientRegisterOK(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -314,7 +307,6 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -344,7 +336,6 @@ func TestNewClientBadAnswer(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 6942cfc9d85..54c44f43eda 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -13,7 +13,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/modelscapi" ) @@ -26,6 +25,7 @@ func TestDecisionsList(t *testing.T) { mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") + if r.URL.RawQuery == "ip=1.2.3.4" { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) @@ -34,14 +34,14 @@ func TestDecisionsList(t *testing.T) { } else { w.WriteHeader(http.StatusOK) w.Write([]byte(`null`)) - //no results + // no results } }) apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -68,7 +68,7 @@ func TestDecisionsList(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) - //Empty return + // Empty return decisionsFilter = DecisionsListOpts{IPEquals: ptr.Of("1.2.3.5")} decisions, resp, err = newcli.Decisions.List(context.Background(), decisionsFilter) require.NoError(t, err) @@ -85,6 +85,7 @@ func TestDecisionsStream(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { if r.URL.RawQuery == "startup=true" { w.WriteHeader(http.StatusOK) @@ -99,6 +100,7 @@ func TestDecisionsStream(t *testing.T) { mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodDelete) + if r.Method == http.MethodDelete { w.WriteHeader(http.StatusOK) } @@ -107,7 +109,7 @@ func TestDecisionsStream(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -134,14 +136,14 @@ func TestDecisionsStream(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) - //and second call, we get empty lists + // and second call, we get empty lists decisions, resp, err = newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: false}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, decisions.New) assert.Empty(t, decisions.Deleted) - //delete stream + // delete stream resp, err = newcli.Decisions.StopStream(context.Background()) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) @@ -156,6 +158,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { if r.URL.RawQuery == "startup=true" { w.WriteHeader(http.StatusOK) @@ -170,7 +173,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -220,6 +223,7 @@ func TestDecisionsStreamV3(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], @@ -231,7 +235,7 @@ func TestDecisionsStreamV3(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -305,7 +309,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -391,7 +395,7 @@ func TestDeleteDecisions(t *testing.T) { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) w.Write([]byte(`{"nbDeleted":"1"}`)) - //w.Write([]byte(`{"message":"0 deleted alerts"}`)) + // w.Write([]byte(`{"message":"0 deleted alerts"}`)) }) log.Printf("URL is %s", urlx) @@ -402,7 +406,6 @@ func TestDeleteDecisions(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -468,6 +471,7 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { got, err := o.addQueryParamsToURL(baseURLString) cstest.RequireErrorContains(t, err, tt.expectedErr) + if tt.expectedErr != "" { return } @@ -502,7 +506,6 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { // client, err := NewClient(&Config{ // MachineID: "test_login", // Password: "test_password", -// UserAgent: cwversion.UserAgent(), // URL: apiURL, // VersionPrefix: "v1", // }) diff --git a/pkg/apiclient/useragent/useragent.go b/pkg/apiclient/useragent/useragent.go new file mode 100644 index 00000000000..5a62ce1ac06 --- /dev/null +++ b/pkg/apiclient/useragent/useragent.go @@ -0,0 +1,9 @@ +package useragent + +import ( + "github.com/crowdsecurity/go-cs-lib/version" +) + +func Default() string { + return "crowdsec/" + version.String() + "-" + version.System +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 5b850cbff0d..73061637ad9 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -23,7 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" @@ -221,7 +220,6 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, Password: password, - UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index d1e48ac90a3..78b16f9c8b7 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestAPICSendMetrics(t *testing.T) { @@ -70,7 +69,7 @@ func TestAPICSendMetrics(t *testing.T) { apiClient, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 546a236251f..51887006ad4 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -26,7 +26,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -676,7 +675,7 @@ func TestAPICWhitelists(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -817,7 +816,7 @@ func TestAPICPullTop(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -832,8 +831,7 @@ func TestAPICPullTop(t *testing.T) { alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) validDecisions := api.dbClient.Ent.Decision.Query().Where( decision.UntilGT(time.Now())). - AllX(context.Background(), - ) + AllX(context.Background()) decisionScenarioFreq := make(map[string]int) alertScenario := make(map[string]int) @@ -905,7 +903,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -997,7 +995,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1024,7 +1022,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1107,7 +1105,7 @@ func TestAPICPush(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1171,7 +1169,7 @@ func TestAPICPull(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index b817121e222..90112d80abf 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -8,8 +8,9 @@ import ( "net/http" "strings" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) const ( @@ -46,7 +47,7 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map } req.Header.Set("X-Api-Key", c.apiKey) - req.Header.Set("User-Agent", cwversion.UserAgent()) + req.Header.Set("User-Agent", useragent.Default()) resp, err := c.httpClient.Do(req) if err != nil { diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index d8607e7e562..683f1853b43 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) // hubTransport wraps a Transport to set a custom User-Agent. @@ -16,7 +16,7 @@ type hubTransport struct { } func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", cwversion.UserAgent()) + req.Header.Set("User-Agent", useragent.Default()) return t.RoundTripper.RoundTrip(req) } diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 28d5c2a621c..b208467aef5 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -7,6 +7,8 @@ import ( goversion "github.com/hashicorp/go-version" "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) var ( @@ -28,7 +30,7 @@ func FullString() string { ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) - ret += fmt.Sprintf("User-Agent: %s\n", UserAgent()) + ret += fmt.Sprintf("User-Agent: %s\n", useragent.Default()) ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) @@ -37,10 +39,6 @@ func FullString() string { return ret } -func UserAgent() string { - return "crowdsec/" + version.String() + "-" + version.System -} - // VersionStrip remove the tag from the version string, used to match with a hub branch func VersionStrip() string { ret := strings.Split(version.Version, "~") diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go index 387e8d151e0..08e10188678 100644 --- a/pkg/metabase/api.go +++ b/pkg/metabase/api.go @@ -9,7 +9,7 @@ import ( "github.com/dghubble/sling" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) type MBClient struct { @@ -38,7 +38,7 @@ var ( func NewMBClient(url string) (*MBClient, error) { httpClient := &http.Client{Timeout: 20 * time.Second} return &MBClient{ - CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", cwversion.UserAgent()), + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", useragent.Default()), Client: httpClient, }, nil } From 4d10e9df00c0abcde9f749db4a546354fd664033 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:32:13 +0200 Subject: [PATCH 278/318] refact: cscli papi (#3222) * cscli papi status - fix nil deref + func test * cscli papi: extract methods status(), sync() * papi status -> stdout * fix nil deref * cscli support dump: include papi status * lint --- cmd/crowdsec-cli/clipapi/papi.go | 144 ++++++++++++++----------- cmd/crowdsec-cli/clisupport/support.go | 28 ++++- cmd/crowdsec-cli/require/require.go | 8 ++ test/bats/04_capi.bats | 15 ++- 4 files changed, 129 insertions(+), 66 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 9a9e4fcaa8b..0752267707b 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -2,8 +2,10 @@ package clipapi import ( "fmt" + "io" "time" + "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/tomb.v2" @@ -13,9 +15,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" ) -type configGetter func() *csconfig.Config +type configGetter = func() *csconfig.Config type cliPapi struct { cfg configGetter @@ -46,104 +49,119 @@ func (cli *cliPapi) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewStatusCmd()) - cmd.AddCommand(cli.NewSyncCmd()) + cmd.AddCommand(cli.newStatusCmd()) + cmd.AddCommand(cli.newSyncCmd()) return cmd } -func (cli *cliPapi) NewStatusCmd() *cobra.Command { +func (cli *cliPapi) Status(out io.Writer, db *database.Client) error { + cfg := cli.cfg() + + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + if err != nil { + return fmt.Errorf("unable to initialize API client: %w", err) + } + + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) + if err != nil { + return fmt.Errorf("unable to initialize PAPI client: %w", err) + } + + perms, err := papi.GetPermissions() + if err != nil { + return fmt.Errorf("unable to get PAPI permissions: %w", err) + } + + lastTimestampStr, err := db.GetConfigItem(apiserver.PapiPullKey) + if err != nil { + lastTimestampStr = ptr.Of("never") + } + + // both can and did happen + if lastTimestampStr == nil || *lastTimestampStr == "0001-01-01T00:00:00Z" { + lastTimestampStr = ptr.Of("never") + } + + fmt.Fprint(out, "You can successfully interact with Polling API (PAPI)\n") + fmt.Fprintf(out, "Console plan: %s\n", perms.Plan) + fmt.Fprintf(out, "Last order received: %s\n", *lastTimestampStr) + fmt.Fprint(out, "PAPI subscriptions:\n") + + for _, sub := range perms.Categories { + fmt.Fprintf(out, " - %s\n", sub) + } + + return nil +} + +func (cli *cliPapi) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error cfg := cli.cfg() db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { return err } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %w", err) - } + return cli.Status(color.Output, db) + }, + } - papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %w", err) - } + return cmd +} - perms, err := papi.GetPermissions() - if err != nil { - return fmt.Errorf("unable to get PAPI permissions: %w", err) - } - var lastTimestampStr *string - lastTimestampStr, err = db.GetConfigItem(apiserver.PapiPullKey) - if err != nil { - lastTimestampStr = ptr.Of("never") - } - log.Infof("You can successfully interact with Polling API (PAPI)") - log.Infof("Console plan: %s", perms.Plan) - log.Infof("Last order received: %s", *lastTimestampStr) +func (cli *cliPapi) sync(out io.Writer, db *database.Client) error { + cfg := cli.cfg() + t := tomb.Tomb{} - log.Infof("PAPI subscriptions:") - for _, sub := range perms.Categories { - log.Infof(" - %s", sub) - } + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + if err != nil { + return fmt.Errorf("unable to initialize API client: %w", err) + } - return nil - }, + t.Go(apic.Push) + + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) + if err != nil { + return fmt.Errorf("unable to initialize PAPI client: %w", err) } - return cmd + t.Go(papi.SyncDecisions) + + err = papi.PullOnce(time.Time{}, true) + if err != nil { + return fmt.Errorf("unable to sync decisions: %w", err) + } + + log.Infof("Sending acknowledgements to CAPI") + + apic.Shutdown() + papi.Shutdown() + t.Wait() + time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + + return nil } -func (cli *cliPapi) NewSyncCmd() *cobra.Command { +func (cli *cliPapi) newSyncCmd() *cobra.Command { cmd := &cobra.Command{ Use: "sync", Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error cfg := cli.cfg() - t := tomb.Tomb{} - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { return err } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %w", err) - } - - t.Go(apic.Push) - - papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %w", err) - } - - t.Go(papi.SyncDecisions) - - err = papi.PullOnce(time.Time{}, true) - if err != nil { - return fmt.Errorf("unable to sync decisions: %w", err) - } - - log.Infof("Sending acknowledgements to CAPI") - - apic.Shutdown() - papi.Shutdown() - t.Wait() - time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done - - return nil + return cli.sync(color.Output, db) }, } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 55f0ec4b03e..f161c66c802 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -28,6 +28,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -47,6 +48,7 @@ const ( SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" + SUPPORT_PAPI_STATUS_PATH = "papi_status.txt" SUPPORT_ACQUISITION_DIR = "config/acquis/" SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" SUPPORT_CRASH_DIR = "crash/" @@ -195,9 +197,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) - cm := clibouncer.New(cli.cfg) + cb := clibouncer.New(cli.cfg) - if err := cm.List(out, db); err != nil { + if err := cb.List(out, db); err != nil { return err } @@ -265,6 +267,24 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { return nil } +func (cli *cliSupport) dumpPAPIStatus(zw *zip.Writer, db *database.Client) error { + log.Info("Collecting PAPI status") + + out := new(bytes.Buffer) + cp := clipapi.New(cli.cfg) + + err := cp.Status(out, db) + if err != nil { + fmt.Fprintf(out, "%s\n", err) + } + + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_PAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil +} + func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { log.Info("Collecting crowdsec config") @@ -517,6 +537,10 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { log.Warnf("could not collect CAPI status: %s", err) } + + if err = cli.dumpPAPIStatus(zipWriter, db); err != nil { + log.Warnf("could not collect PAPI status: %s", err) + } } if !skipLAPI { diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 15d8bce682d..191eee55bc5 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -34,6 +34,14 @@ func CAPI(c *csconfig.Config) error { } func PAPI(c *csconfig.Config) error { + if err := CAPI(c); err != nil { + return err + } + + if err := CAPIRegistered(c); err != nil { + return err + } + if c.API.Server.OnlineClient.Credentials.PapiURL == "" { return errors.New("no PAPI URL in configuration") } diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index f17ce376d62..7ba6bfa4428 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -46,13 +46,26 @@ setup() { assert_stderr --regexp "no configuration for Central API \(CAPI\) in '$(echo $CONFIG_YAML|sed s#//#/#g)'" } -@test "cscli capi status" { +@test "cscli {capi,papi} status" { ./instance-data load config_enable_capi + + # should not panic with no credentials, but return an error + rune -1 cscli papi status + assert_stderr --partial "the Central API (CAPI) must be configured with 'cscli capi register'" + rune -0 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX rune -1 cscli capi status assert_stderr --partial "no scenarios or appsec-rules installed, abort" + rune -1 cscli papi status + assert_stderr --partial "no PAPI URL in configuration" + + rune -0 cscli console enable console_management + rune -1 cscli papi status + assert_stderr --partial "unable to get PAPI permissions" + assert_stderr --partial "Forbidden for plan" + rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status assert_output --partial "Loaded credentials from" From 57539f61b46ddf8d5f6266a793458d51bea4cd74 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:38:15 +0200 Subject: [PATCH 279/318] refact cscli - don't export functions if not required (#3224) * unexport subcommand constructors * unexport internal methods * lint + rename local variables --- cmd/crowdsec-cli/clialert/alerts.go | 86 +++++++++---------- cmd/crowdsec-cli/clialert/table.go | 2 +- cmd/crowdsec-cli/clihub/item_metrics.go | 26 +++--- cmd/crowdsec-cli/clihub/items.go | 6 +- cmd/crowdsec-cli/clihubtest/clean.go | 2 +- cmd/crowdsec-cli/clihubtest/coverage.go | 2 +- cmd/crowdsec-cli/clihubtest/create.go | 2 +- cmd/crowdsec-cli/clihubtest/eval.go | 2 +- cmd/crowdsec-cli/clihubtest/explain.go | 6 +- cmd/crowdsec-cli/clihubtest/hubtest.go | 16 ++-- cmd/crowdsec-cli/clihubtest/info.go | 2 +- cmd/crowdsec-cli/clihubtest/list.go | 2 +- cmd/crowdsec-cli/clihubtest/run.go | 18 ++-- .../clinotifications/notifications.go | 16 ++-- cmd/crowdsec-cli/clisetup/setup.go | 27 +++--- cmd/crowdsec-cli/clisimulation/simulation.go | 12 +-- 16 files changed, 113 insertions(+), 114 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index c5e27394f77..757a84927e5 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -29,7 +29,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func DecisionsFromAlert(alert *models.Alert) string { +func decisionsFromAlert(alert *models.Alert) string { ret := "" decMap := make(map[string]int) @@ -77,7 +77,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi *alertItem.Scenario, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - DecisionsFromAlert(alertItem), + decisionsFromAlert(alertItem), *alertItem.StartAt, } if printMachine { @@ -227,10 +227,10 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewFlushCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newFlushCmd()) + cmd.AddCommand(cli.newDeleteCmd()) return cmd } @@ -323,7 +323,7 @@ func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, return nil } -func (cli *cliAlerts) NewListCmd() *cobra.Command { +func (cli *cliAlerts) newListCmd() *cobra.Command { alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -377,53 +377,53 @@ cscli alerts list --type ban`, return cmd } -func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, ActiveDecision *bool, AlertDeleteAll bool, delAlertByID string, contained *bool) error { +func (cli *cliAlerts) delete(delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { var err error - if !AlertDeleteAll { - *alertDeleteFilter.ScopeEquals, err = SanitizeScope(*alertDeleteFilter.ScopeEquals, *alertDeleteFilter.IPEquals, *alertDeleteFilter.RangeEquals) + if !deleteAll { + *delFilter.ScopeEquals, err = SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) if err != nil { return err } - if ActiveDecision != nil { - alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + if activeDecision != nil { + delFilter.ActiveDecisionEquals = activeDecision } - if *alertDeleteFilter.ScopeEquals == "" { - alertDeleteFilter.ScopeEquals = nil + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil } - if *alertDeleteFilter.ValueEquals == "" { - alertDeleteFilter.ValueEquals = nil + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil } - if *alertDeleteFilter.ScenarioEquals == "" { - alertDeleteFilter.ScenarioEquals = nil + if *delFilter.ScenarioEquals == "" { + delFilter.ScenarioEquals = nil } - if *alertDeleteFilter.IPEquals == "" { - alertDeleteFilter.IPEquals = nil + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil } - if *alertDeleteFilter.RangeEquals == "" { - alertDeleteFilter.RangeEquals = nil + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil } if contained != nil && *contained { - alertDeleteFilter.Contains = new(bool) + delFilter.Contains = new(bool) } limit := 0 - alertDeleteFilter.Limit = &limit + delFilter.Limit = &limit } else { limit := 0 - alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} + delFilter = apiclient.AlertsDeleteOpts{Limit: &limit} } var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) + alerts, _, err = cli.client.Alerts.Delete(context.Background(), delFilter) if err != nil { return fmt.Errorf("unable to delete alerts: %w", err) } @@ -439,14 +439,14 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ return nil } -func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { +func (cli *cliAlerts) newDeleteCmd() *cobra.Command { var ( - ActiveDecision *bool - AlertDeleteAll bool + activeDecision *bool + deleteAll bool delAlertByID string ) - alertDeleteFilter := apiclient.AlertsDeleteOpts{ + delFilter := apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -467,12 +467,12 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, Aliases: []string{"remove"}, Args: cobra.ExactArgs(0), PreRunE: func(cmd *cobra.Command, _ []string) error { - if AlertDeleteAll { + if deleteAll { return nil } - if *alertDeleteFilter.ScopeEquals == "" && *alertDeleteFilter.ValueEquals == "" && - *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && - *alertDeleteFilter.RangeEquals == "" && delAlertByID == "" { + if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && + *delFilter.ScenarioEquals == "" && *delFilter.IPEquals == "" && + *delFilter.RangeEquals == "" && delAlertByID == "" { _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } @@ -480,19 +480,19 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.delete(alertDeleteFilter, ActiveDecision, AlertDeleteAll, delAlertByID, contained) + return cli.delete(delFilter, activeDecision, deleteAll, delAlertByID, contained) }, } flags := cmd.Flags() flags.SortFlags = false - flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") - flags.StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - flags.StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - flags.StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - flags.StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVar(delFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") flags.StringVar(&delAlertByID, "id", "", "alert ID") - flags.BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + flags.BoolVarP(&deleteAll, "all", "a", false, "delete all alerts") flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd @@ -538,7 +538,7 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { return nil } -func (cli *cliAlerts) NewInspectCmd() *cobra.Command { +func (cli *cliAlerts) newInspectCmd() *cobra.Command { var details bool cmd := &cobra.Command{ @@ -561,7 +561,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { return cmd } -func (cli *cliAlerts) NewFlushCmd() *cobra.Command { +func (cli *cliAlerts) newFlushCmd() *cobra.Command { var ( maxItems int maxAge string diff --git a/cmd/crowdsec-cli/clialert/table.go b/cmd/crowdsec-cli/clialert/table.go index 5dec63ec152..1416e1e435c 100644 --- a/cmd/crowdsec-cli/clialert/table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -38,7 +38,7 @@ func alertsTable(out io.Writer, wantColor string, alerts *models.GetAlertsRespon *alertItem.Scenario, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - DecisionsFromAlert(alertItem), + decisionsFromAlert(alertItem), *alertItem.StartAt, } diff --git a/cmd/crowdsec-cli/clihub/item_metrics.go b/cmd/crowdsec-cli/clihub/item_metrics.go index aaee63d1d38..f4af8f635db 100644 --- a/cmd/crowdsec-cli/clihub/item_metrics.go +++ b/cmd/crowdsec-cli/clihub/item_metrics.go @@ -16,22 +16,22 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { +func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := GetParserMetric(prometheusURL, hubItem.Name) + metrics := getParserMetric(prometheusURL, hubItem.Name) parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := GetScenarioMetric(prometheusURL, hubItem.Name) + metrics := getScenarioMetric(prometheusURL, hubItem.Name) scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(prometheusURL, sub, wantColor); err != nil { + if err := showMetrics(prometheusURL, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) + metrics := getAppsecRuleMetric(prometheusURL, hubItem.Name) appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } @@ -39,11 +39,11 @@ func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er return nil } -// GetParserMetric is a complete rip from prom2json -func GetParserMetric(url string, itemName string) map[string]map[string]int { +// getParserMetric is a complete rip from prom2json +func getParserMetric(url string, itemName string) map[string]map[string]int { stats := make(map[string]map[string]int) - result := GetPrometheusMetric(url) + result := getPrometheusMetric(url) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -131,7 +131,7 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { return stats } -func GetScenarioMetric(url string, itemName string) map[string]int { +func getScenarioMetric(url string, itemName string) map[string]int { stats := make(map[string]int) stats["instantiation"] = 0 @@ -140,7 +140,7 @@ func GetScenarioMetric(url string, itemName string) map[string]int { stats["pour"] = 0 stats["underflow"] = 0 - result := GetPrometheusMetric(url) + result := getPrometheusMetric(url) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -195,13 +195,13 @@ func GetScenarioMetric(url string, itemName string) map[string]int { return stats } -func GetAppsecRuleMetric(url string, itemName string) map[string]int { +func getAppsecRuleMetric(url string, itemName string) map[string]int { stats := make(map[string]int) stats["inband_hits"] = 0 stats["outband_hits"] = 0 - results := GetPrometheusMetric(url) + results := getPrometheusMetric(url) for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -260,7 +260,7 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { return stats } -func GetPrometheusMetric(url string) []*prom2json.Family { +func getPrometheusMetric(url string) []*prom2json.Family { mfChan := make(chan *dto.MetricFamily, 1024) // Start with the DefaultTransport for sane defaults. diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index 4dd3c46e0f9..0ab89654dac 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -146,7 +146,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } -func InspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { +func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) @@ -174,10 +174,10 @@ func InspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusUR fmt.Println() } - if showMetrics { + if wantMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(prometheusURL, item, wantColor); err != nil { + if err := showMetrics(prometheusURL, item, wantColor); err != nil { return err } } diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go index 075d7961d84..e3b40b6bd57 100644 --- a/cmd/crowdsec-cli/clihubtest/clean.go +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliHubTest) NewCleanCmd() *cobra.Command { +func (cli *cliHubTest) newCleanCmd() *cobra.Command { cmd := &cobra.Command{ Use: "clean", Short: "clean [test_name]", diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go index b6e5b1e9c01..5a4f231caf5 100644 --- a/cmd/crowdsec-cli/clihubtest/coverage.go +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -140,7 +140,7 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp return nil } -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { +func (cli *cliHubTest) newCoverageCmd() *cobra.Command { var ( showParserCov bool showScenarioCov bool diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go index 523c9740cf7..e0834f7e569 100644 --- a/cmd/crowdsec-cli/clihubtest/create.go +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) NewCreateCmd() *cobra.Command { +func (cli *cliHubTest) newCreateCmd() *cobra.Command { var ( ignoreParsers bool labels map[string]string diff --git a/cmd/crowdsec-cli/clihubtest/eval.go b/cmd/crowdsec-cli/clihubtest/eval.go index 204a0bc420e..83e9eae9c15 100644 --- a/cmd/crowdsec-cli/clihubtest/eval.go +++ b/cmd/crowdsec-cli/clihubtest/eval.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliHubTest) NewEvalCmd() *cobra.Command { +func (cli *cliHubTest) newEvalCmd() *cobra.Command { var evalExpression string cmd := &cobra.Command{ diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index 4183b6a515d..dbe10fa7ec0 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -8,7 +8,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/dumps" ) - func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { test, err := HubTest.LoadTestItem(testName) if err != nil { @@ -39,7 +38,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error opts := dumps.DumpOpts{ Details: details, - SkipOk: skipOk, + SkipOk: skipOk, } dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) @@ -47,8 +46,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error return nil } - -func (cli *cliHubTest) NewExplainCmd() *cobra.Command { +func (cli *cliHubTest) newExplainCmd() *cobra.Command { var ( details bool skipOk bool diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go index 22607336177..3420e21e1e2 100644 --- a/cmd/crowdsec-cli/clihubtest/hubtest.go +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -68,14 +68,14 @@ func (cli *cliHubTest) NewCommand() *cobra.Command { cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") - cmd.AddCommand(cli.NewCreateCmd()) - cmd.AddCommand(cli.NewRunCmd()) - cmd.AddCommand(cli.NewCleanCmd()) - cmd.AddCommand(cli.NewInfoCmd()) - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewCoverageCmd()) - cmd.AddCommand(cli.NewEvalCmd()) - cmd.AddCommand(cli.NewExplainCmd()) + cmd.AddCommand(cli.newCreateCmd()) + cmd.AddCommand(cli.newRunCmd()) + cmd.AddCommand(cli.newCleanCmd()) + cmd.AddCommand(cli.newInfoCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newCoverageCmd()) + cmd.AddCommand(cli.newEvalCmd()) + cmd.AddCommand(cli.newExplainCmd()) return cmd } diff --git a/cmd/crowdsec-cli/clihubtest/info.go b/cmd/crowdsec-cli/clihubtest/info.go index 2e3fd132340..a5d760eea01 100644 --- a/cmd/crowdsec-cli/clihubtest/info.go +++ b/cmd/crowdsec-cli/clihubtest/info.go @@ -10,7 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) NewInfoCmd() *cobra.Command { +func (cli *cliHubTest) newInfoCmd() *cobra.Command { cmd := &cobra.Command{ Use: "info", Short: "info [test_name]", diff --git a/cmd/crowdsec-cli/clihubtest/list.go b/cmd/crowdsec-cli/clihubtest/list.go index 76c51927897..3e76824a18e 100644 --- a/cmd/crowdsec-cli/clihubtest/list.go +++ b/cmd/crowdsec-cli/clihubtest/list.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliHubTest) NewListCmd() *cobra.Command { +func (cli *cliHubTest) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list", diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 552ee87c16e..57956ce67dc 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -16,15 +16,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { +func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost string, args []string) error { cfg := cli.cfg() if !runAll && len(args) == 0 { return errors.New("please provide test to run or --all flag") } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost + hubPtr.NucleiTargetHost = nucleiTargetHost + hubPtr.AppSecHost = appSecHost if runAll { if err := hubPtr.LoadAllTests(); err != nil { @@ -56,13 +56,13 @@ func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost stri return nil } -func (cli *cliHubTest) NewRunCmd() *cobra.Command { +func (cli *cliHubTest) newRunCmd() *cobra.Command { var ( noClean bool runAll bool forceClean bool - NucleiTargetHost string - AppSecHost string + nucleiTargetHost string + appSecHost string ) cmd := &cobra.Command{ @@ -70,7 +70,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { Short: "run [test_name]", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - return cli.run(runAll, NucleiTargetHost, AppSecHost, args) + return cli.run(runAll, nucleiTargetHost, appSecHost, args) }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -187,8 +187,8 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") - cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") - cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") + cmd.Flags().StringVar(&nucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") + cmd.Flags().StringVar(&appSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") return cmd diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 87a94243c5d..eb568ca5fa6 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -72,10 +72,10 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewReinjectCmd()) - cmd.AddCommand(cli.NewTestCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newReinjectCmd()) + cmd.AddCommand(cli.newTestCmd()) return cmd } @@ -152,7 +152,7 @@ func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, return ncfgs, nil } -func (cli *cliNotifications) NewListCmd() *cobra.Command { +func (cli *cliNotifications) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list notifications plugins", @@ -201,7 +201,7 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewInspectCmd() *cobra.Command { +func (cli *cliNotifications) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", Short: "Inspect notifications plugin", @@ -260,7 +260,7 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli cliNotifications) NewTestCmd() *cobra.Command { +func (cli cliNotifications) newTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb @@ -351,7 +351,7 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewReinjectCmd() *cobra.Command { +func (cli *cliNotifications) newReinjectCmd() *cobra.Command { var ( alertOverride string alert *models.Alert diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 2a1a2bd0560..8aee45b4287 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -39,10 +39,10 @@ func (cli *cliSetup) NewCommand() *cobra.Command { DisableAutoGenTag: true, } - cmd.AddCommand(cli.NewDetectCmd()) - cmd.AddCommand(cli.NewInstallHubCmd()) - cmd.AddCommand(cli.NewDataSourcesCmd()) - cmd.AddCommand(cli.NewValidateCmd()) + cmd.AddCommand(cli.newDetectCmd()) + cmd.AddCommand(cli.newInstallHubCmd()) + cmd.AddCommand(cli.newDataSourcesCmd()) + cmd.AddCommand(cli.newValidateCmd()) return cmd } @@ -76,23 +76,24 @@ func (f *detectFlags) bind(cmd *cobra.Command) { flags.BoolVar(&f.outYaml, "yaml", false, "output yaml, not json") } -func (cli *cliSetup) NewDetectCmd() *cobra.Command { +func (cli *cliSetup) newDetectCmd() *cobra.Command { f := detectFlags{} cmd := &cobra.Command{ Use: "detect", Short: "detect running services, generate a setup file", DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.detect(f) }, } f.bind(cmd) + return cmd } -func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { +func (cli *cliSetup) newInstallHubCmd() *cobra.Command { var dryRun bool cmd := &cobra.Command{ @@ -100,7 +101,7 @@ func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { Short: "install items from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { return cli.install(cmd.Context(), dryRun, args[0]) }, } @@ -111,7 +112,7 @@ func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { return cmd } -func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { +func (cli *cliSetup) newDataSourcesCmd() *cobra.Command { var toDir string cmd := &cobra.Command{ @@ -119,7 +120,7 @@ func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { Short: "generate datasource (acquisition) configuration from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { return cli.dataSources(args[0], toDir) }, } @@ -130,13 +131,13 @@ func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { return cmd } -func (cli *cliSetup) NewValidateCmd() *cobra.Command { +func (cli *cliSetup) newValidateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "validate [setup_file]", Short: "validate a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { return cli.validate(args[0]) }, } @@ -165,7 +166,7 @@ func (cli *cliSetup) detect(f detectFlags) error { } if !f.snubSystemd { - _, err := exec.LookPath("systemctl") + _, err = exec.LookPath("systemctl") if err != nil { log.Debug("systemctl not available: snubbing systemd") diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index bf986f82c06..9d9defd78e7 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -55,14 +55,14 @@ cscli simulation disable crowdsecurity/ssh-bf`, cmd.Flags().SortFlags = false cmd.PersistentFlags().SortFlags = false - cmd.AddCommand(cli.NewEnableCmd()) - cmd.AddCommand(cli.NewDisableCmd()) - cmd.AddCommand(cli.NewStatusCmd()) + cmd.AddCommand(cli.newEnableCmd()) + cmd.AddCommand(cli.newDisableCmd()) + cmd.AddCommand(cli.newStatusCmd()) return cmd } -func (cli *cliSimulation) NewEnableCmd() *cobra.Command { +func (cli *cliSimulation) newEnableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -122,7 +122,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { return cmd } -func (cli *cliSimulation) NewDisableCmd() *cobra.Command { +func (cli *cliSimulation) newDisableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -169,7 +169,7 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { return cmd } -func (cli *cliSimulation) NewStatusCmd() *cobra.Command { +func (cli *cliSimulation) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Show simulation mode status", From 584a19f3f18763b4a1ccba0f2c2249754bdc154a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:07:30 +0200 Subject: [PATCH 280/318] fix #3225: info->debug for trigger bucket overflow (#3227) * fix #3225: info->debug for trigger bucket overflow * lint --- pkg/leakybucket/trigger.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index b6af1431888..7558f696dc7 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -16,25 +16,31 @@ func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *type // Pour makes the bucket overflow all the time // TriggerPour unconditionally overflows return func(msg types.Event, l *Leaky) *types.Event { + now := time.Now().UTC() + if l.Mode == types.TIMEMACHINE { var d time.Time + err := d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) - d = time.Now().UTC() + + d = now } + l.logger.Debugf("yay timemachine overflow time : %s --> %s", d, msg.MarshaledTime) l.Last_ts = d l.First_ts = d l.Ovflw_ts = d } else { - l.Last_ts = time.Now().UTC() - l.First_ts = time.Now().UTC() - l.Ovflw_ts = time.Now().UTC() + l.Last_ts = now + l.First_ts = now + l.Ovflw_ts = now } + l.Total_count = 1 - l.logger.Infof("Bucket overflow") + l.logger.Debug("Bucket overflow") l.Queue.Add(msg) l.Out <- l.Queue From d5c587cf2b7e4ee5ae74b2249016814fc6769e91 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:08:22 +0200 Subject: [PATCH 281/318] refact / split APIServer.Run() method (#3215) --- pkg/apiserver/apiserver.go | 124 ++++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 57 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 31b31bcb82d..42dcb219379 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -301,6 +301,72 @@ func (s *APIServer) Router() (*gin.Engine, error) { return s.router, nil } +func (s *APIServer) apicPush() error { + if err := s.apic.Push(); err != nil { + log.Errorf("capi push: %s", err) + return err + } + + return nil +} + +func (s *APIServer) apicPull() error { + if err := s.apic.Pull(); err != nil { + log.Errorf("capi pull: %s", err) + return err + } + + return nil +} + +func (s *APIServer) papiPull() error { + if err := s.papi.Pull(); err != nil { + log.Errorf("papi pull: %s", err) + return err + } + + return nil +} + +func (s *APIServer) papiSync() error { + if err := s.papi.SyncDecisions(); err != nil { + log.Errorf("capi decisions sync: %s", err) + return err + } + + return nil +} + +func (s *APIServer) initAPIC() { + s.apic.pushTomb.Go(s.apicPush) + s.apic.pullTomb.Go(s.apicPull) + + // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios + if s.apic.apiClient.IsEnrolled() { + if s.consoleConfig.IsPAPIEnabled() { + if s.papi.URL != "" { + log.Info("Starting PAPI decision receiver") + s.papi.pullTomb.Go(s.papiPull) + s.papi.syncTomb.Go(s.papiSync) + } else { + log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") + } + } else { + log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`") + } + } + + s.apic.metricsTomb.Go(func() error { + s.apic.SendMetrics(make(chan bool)) + return nil + }) + + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics() + return nil + }) +} + func (s *APIServer) Run(apiReady chan bool) error { defer trace.CatchPanic("lapi/runServer") @@ -316,63 +382,7 @@ func (s *APIServer) Run(apiReady chan bool) error { } if s.apic != nil { - s.apic.pushTomb.Go(func() error { - if err := s.apic.Push(); err != nil { - log.Errorf("capi push: %s", err) - return err - } - - return nil - }) - - s.apic.pullTomb.Go(func() error { - if err := s.apic.Pull(); err != nil { - log.Errorf("capi pull: %s", err) - return err - } - - return nil - }) - - // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios - if s.apic.apiClient.IsEnrolled() { - if s.consoleConfig.IsPAPIEnabled() { - if s.papi.URL != "" { - log.Info("Starting PAPI decision receiver") - s.papi.pullTomb.Go(func() error { - if err := s.papi.Pull(); err != nil { - log.Errorf("papi pull: %s", err) - return err - } - - return nil - }) - - s.papi.syncTomb.Go(func() error { - if err := s.papi.SyncDecisions(); err != nil { - log.Errorf("capi decisions sync: %s", err) - return err - } - - return nil - }) - } else { - log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") - } - } else { - log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`") - } - } - - s.apic.metricsTomb.Go(func() error { - s.apic.SendMetrics(make(chan bool)) - return nil - }) - - s.apic.metricsTomb.Go(func() error { - s.apic.SendUsageMetrics() - return nil - }) + s.initAPIC() } s.httpServerTomb.Go(func() error { From cae76baa3a3af7b6279155c2a95a814de95848b4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:26:39 +0200 Subject: [PATCH 282/318] refact acquisition: build profiles (optionally exclude datasources from final binary) (#3217) example $ make BUILD_PROFILE=minimal or $ make EXCLUDE=datasource_s3,datasource_kinesis --- .github/workflows/go-tests.yml | 5 + Makefile | 63 ++++++ cmd/crowdsec/appsec.go | 18 ++ cmd/crowdsec/appsec_stub.go | 11 ++ cmd/crowdsec/crowdsec.go | 11 +- pkg/acquisition/acquisition.go | 185 +++++++++++------- pkg/acquisition/acquisition_test.go | 27 ++- pkg/acquisition/appsec.go | 12 ++ pkg/acquisition/cloudwatch.go | 12 ++ pkg/acquisition/docker.go | 12 ++ pkg/acquisition/file.go | 12 ++ pkg/acquisition/journalctl.go | 12 ++ pkg/acquisition/k8s.go | 12 ++ pkg/acquisition/kafka.go | 12 ++ pkg/acquisition/kinesis.go | 12 ++ pkg/acquisition/loki.go | 12 ++ .../modules/appsec/appsec_lnx_test.go | 2 +- .../modules/appsec/appsec_win_test.go | 1 - pkg/acquisition/s3.go | 12 ++ pkg/acquisition/syslog.go | 12 ++ pkg/acquisition/wineventlog.go | 12 ++ pkg/cwversion/constraint/constraint.go | 32 +++ pkg/cwversion/version.go | 58 +++--- pkg/leakybucket/manager_load.go | 6 +- pkg/parser/stage.go | 6 +- pkg/setup/detect.go | 6 +- pkg/setup/detect_test.go | 2 +- test/bats.mk | 6 + test/bats/01_crowdsec.bats | 37 +++- 29 files changed, 485 insertions(+), 135 deletions(-) create mode 100644 cmd/crowdsec/appsec.go create mode 100644 cmd/crowdsec/appsec_stub.go create mode 100644 pkg/acquisition/appsec.go create mode 100644 pkg/acquisition/cloudwatch.go create mode 100644 pkg/acquisition/docker.go create mode 100644 pkg/acquisition/file.go create mode 100644 pkg/acquisition/journalctl.go create mode 100644 pkg/acquisition/k8s.go create mode 100644 pkg/acquisition/kafka.go create mode 100644 pkg/acquisition/kinesis.go create mode 100644 pkg/acquisition/loki.go create mode 100644 pkg/acquisition/s3.go create mode 100644 pkg/acquisition/syslog.go create mode 100644 pkg/acquisition/wineventlog.go create mode 100644 pkg/cwversion/constraint/constraint.go diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3709c695231..df5c0b4fb88 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -142,6 +142,11 @@ jobs: make build BUILD_STATIC=1 make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + # check if some component stubs are missing + - name: "Build profile: minimal" + run: | + make build BUILD_PROFILE=minimal + - name: Run tests again, dynamic run: | make clean build diff --git a/Makefile b/Makefile index 207b5d610f0..6bd3cbb7980 100644 --- a/Makefile +++ b/Makefile @@ -115,6 +115,68 @@ STRIP_SYMBOLS := -s -w DISABLE_OPTIMIZATION := endif +#-------------------------------------- + +# Handle optional components and build profiles, to save space on the final binaries. + +# Keep it safe for now until we decide how to expand on the idea. Either choose a profile or exclude components manually. +# For example if we want to disable some component by default, or have opt-in components (INCLUDE?). + +ifeq ($(and $(BUILD_PROFILE),$(EXCLUDE)),1) +$(error "Cannot specify both BUILD_PROFILE and EXCLUDE") +endif + +COMPONENTS := \ + datasource_appsec \ + datasource_cloudwatch \ + datasource_docker \ + datasource_file \ + datasource_k8saudit \ + datasource_kafka \ + datasource_journalctl \ + datasource_kinesis \ + datasource_loki \ + datasource_s3 \ + datasource_syslog \ + datasource_wineventlog + +comma := , +space := $(empty) $(empty) + +# Predefined profiles + +# keep only datasource-file +EXCLUDE_MINIMAL := $(subst $(space),$(comma),$(filter-out datasource_file,,$(COMPONENTS))) + +# example +# EXCLUDE_MEDIUM := datasource_kafka,datasource_kinesis,datasource_s3 + +BUILD_PROFILE ?= default + +# Set the EXCLUDE_LIST based on the chosen profile, unless EXCLUDE is already set +ifeq ($(BUILD_PROFILE),minimal) +EXCLUDE ?= $(EXCLUDE_MINIMAL) +else ifneq ($(BUILD_PROFILE),default) +$(error Invalid build profile specified: $(BUILD_PROFILE). Valid profiles are: minimal, default) +endif + +# Create list of excluded components from the EXCLUDE variable +EXCLUDE_LIST := $(subst $(comma),$(space),$(EXCLUDE)) + +INVALID_COMPONENTS := $(filter-out $(COMPONENTS),$(EXCLUDE_LIST)) +ifneq ($(INVALID_COMPONENTS),) +$(error Invalid optional components specified in EXCLUDE: $(INVALID_COMPONENTS). Valid components are: $(COMPONENTS)) +endif + +# Convert the excluded components to "no_" form +COMPONENT_TAGS := $(foreach component,$(EXCLUDE_LIST),no_$(component)) + +ifneq ($(COMPONENT_TAGS),) +GO_TAGS := $(GO_TAGS),$(subst $(space),$(comma),$(COMPONENT_TAGS)) +endif + +#-------------------------------------- + export LD_OPTS=-ldflags "$(STRIP_SYMBOLS) $(EXTLDFLAGS) $(LD_OPTS_VARS)" \ -trimpath -tags $(GO_TAGS) $(DISABLE_OPTIMIZATION) @@ -130,6 +192,7 @@ build: build-info crowdsec cscli plugins ## Build crowdsec, cscli and plugins .PHONY: build-info build-info: ## Print build information $(info Building $(BUILD_VERSION) ($(BUILD_TAG)) $(BUILD_TYPE) for $(GOOS)/$(GOARCH)) + $(info Excluded components: $(EXCLUDE_LIST)) ifneq (,$(RE2_FAIL)) $(error $(RE2_FAIL)) diff --git a/cmd/crowdsec/appsec.go b/cmd/crowdsec/appsec.go new file mode 100644 index 00000000000..cb02b137dcd --- /dev/null +++ b/cmd/crowdsec/appsec.go @@ -0,0 +1,18 @@ +// +build !no_datasource_appsec + +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func LoadAppsecRules(hub *cwhub.Hub) error { + if err := appsec.LoadAppsecRules(hub); err != nil { + return fmt.Errorf("while loading appsec rules: %w", err) + } + + return nil +} diff --git a/cmd/crowdsec/appsec_stub.go b/cmd/crowdsec/appsec_stub.go new file mode 100644 index 00000000000..4a65b32a9ad --- /dev/null +++ b/cmd/crowdsec/appsec_stub.go @@ -0,0 +1,11 @@ +//go:build no_datasource_appsec + +package main + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func LoadAppsecRules(hub *cwhub.Hub) error { + return nil +} diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 5aafc6b0dfe..460e8ab4328 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" - "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -43,12 +42,13 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] return nil, nil, fmt.Errorf("while loading parsers: %w", err) } - if err := LoadBuckets(cConfig, hub); err != nil { + if err = LoadBuckets(cConfig, hub); err != nil { return nil, nil, fmt.Errorf("while loading scenarios: %w", err) } - if err := appsec.LoadAppsecRules(hub); err != nil { - return nil, nil, fmt.Errorf("while loading appsec rules: %w", err) + // can be nerfed by a build flag + if err = LoadAppsecRules(hub); err != nil { + return nil, nil, err } datasources, err := LoadAcquisition(cConfig) @@ -82,6 +82,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return nil }) } + parserWg.Done() return nil @@ -108,6 +109,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return runPour(inputEventChan, holders, buckets, cConfig) }) } + bucketWg.Done() return nil @@ -134,6 +136,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient) }) } + outputWg.Done() return nil diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 634557021f1..38bf228abbc 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -18,18 +18,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - appsecacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec" - cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" - dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" - fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" - journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" - kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" - kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" - k8sauditacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kubernetesaudit" - lokiacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" - s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" - syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" - wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -64,29 +52,72 @@ type DataSource interface { Dump() interface{} } -var AcquisitionSources = map[string]func() DataSource{ - "file": func() DataSource { return &fileacquisition.FileSource{} }, - "journalctl": func() DataSource { return &journalctlacquisition.JournalCtlSource{} }, - "cloudwatch": func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }, - "syslog": func() DataSource { return &syslogacquisition.SyslogSource{} }, - "docker": func() DataSource { return &dockeracquisition.DockerSource{} }, - "kinesis": func() DataSource { return &kinesisacquisition.KinesisSource{} }, - "wineventlog": func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }, - "kafka": func() DataSource { return &kafkaacquisition.KafkaSource{} }, - "k8s-audit": func() DataSource { return &k8sauditacquisition.KubernetesAuditSource{} }, - "loki": func() DataSource { return &lokiacquisition.LokiSource{} }, - "s3": func() DataSource { return &s3acquisition.S3Source{} }, - "appsec": func() DataSource { return &appsecacquisition.AppsecSource{} }, -} +var ( + // We declare everything here so we can tell if they are unsupported, or excluded from the build + AcquisitionSources = map[string]func() DataSource{ + "appsec": nil, + "cloudwatch": nil, + "docker": nil, + "file": nil, + "journalctl": nil, + "k8s-audit": nil, + "kafka": nil, + "kinesis": nil, + "loki": nil, + "s3": nil, + "syslog": nil, + "wineventlog": nil, + } + transformRuntimes = map[string]*vm.Program{} +) -var transformRuntimes = map[string]*vm.Program{} +func GetDataSourceIface(dataSourceType string) (DataSource, error) { + source, ok := AcquisitionSources[dataSourceType] + if !ok { + return nil, fmt.Errorf("unknown data source %s", dataSourceType) + } -func GetDataSourceIface(dataSourceType string) DataSource { - source := AcquisitionSources[dataSourceType] if source == nil { - return nil + return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) + } + + return source(), nil +} + +// registerDataSource registers a datasource in the AcquisitionSources map. +// It must be called in the init() function of the datasource package, and the datasource name +// must be declared with a nil value in the map, to allow for conditional compilation. +func registerDataSource(dataSourceType string, dsGetter func() DataSource) { + _, ok := AcquisitionSources[dataSourceType] + if !ok { + panic("datasource must be declared in the map: " + dataSourceType) + } + + AcquisitionSources[dataSourceType] = dsGetter +} + +// setupLogger creates a logger for the datasource to use at runtime. +func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, fmt.Errorf("while configuring datasource logger: %w", err) + } + + if level != nil { + clog.SetLevel(*level) + } + + fields := log.Fields{ + "type": source, + } + + if name != "" { + fields["name"] = name } - return source() + + subLogger := clog.WithFields(fields) + + return subLogger, nil } // DataSourceConfigure creates and returns a DataSource object from a configuration, @@ -100,33 +131,27 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics if err != nil { return nil, fmt.Errorf("unable to marshal back interface: %w", err) } - if dataSrc := GetDataSourceIface(commonConfig.Source); dataSrc != nil { - /* this logger will then be used by the datasource at runtime */ - clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { - return nil, fmt.Errorf("while configuring datasource logger: %w", err) - } - if commonConfig.LogLevel != nil { - clog.SetLevel(*commonConfig.LogLevel) - } - customLog := log.Fields{ - "type": commonConfig.Source, - } - if commonConfig.Name != "" { - customLog["name"] = commonConfig.Name - } - subLogger := clog.WithFields(customLog) - /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ - if err := dataSrc.CanRun(); err != nil { - return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} - } - /* configure the actual datasource */ - if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { - return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) - } - return &dataSrc, nil + + dataSrc, err := GetDataSourceIface(commonConfig.Source) + if err != nil { + return nil, err + } + + subLogger, err := setupLogger(commonConfig.Source, commonConfig.Name, commonConfig.LogLevel) + if err != nil { + return nil, err + } + + /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ + if err := dataSrc.CanRun(); err != nil { + return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} + } + /* configure the actual datasource */ + if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { + return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) } - return nil, fmt.Errorf("cannot find source %s", commonConfig.Source) + + return &dataSrc, nil } // detectBackwardCompatAcquis: try to magically detect the type for backward compat (type was not mandatory then) @@ -134,12 +159,15 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { if _, ok := sub.Config["filename"]; ok { return "file" } + if _, ok := sub.Config["filenames"]; ok { return "file" } + if _, ok := sub.Config["journalctl_filter"]; ok { return "journalctl" } + return "" } @@ -150,29 +178,35 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr if len(frags) == 1 { return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) } - dataSrc := GetDataSourceIface(frags[0]) - if dataSrc == nil { - return nil, fmt.Errorf("no acquisition for protocol %s://", frags[0]) + + dataSrc, err := GetDataSourceIface(frags[0]) + if err != nil { + return nil, fmt.Errorf("no acquisition for protocol %s:// - %w", frags[0], err) } - /* this logger will then be used by the datasource at runtime */ - clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { - return nil, fmt.Errorf("while configuring datasource logger: %w", err) + + subLogger, err := setupLogger(dsn, "", nil) + if err != nil { + return nil, err } - subLogger := clog.WithField("type", dsn) + uniqueId := uuid.NewString() + if transformExpr != "" { vm, err := expr.Compile(transformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return nil, fmt.Errorf("while compiling transform expression '%s': %w", transformExpr, err) } + transformRuntimes[uniqueId] = vm } - err := dataSrc.ConfigureByDSN(dsn, labels, subLogger, uniqueId) + + err = dataSrc.ConfigureByDSN(dsn, labels, subLogger, uniqueId) if err != nil { return nil, fmt.Errorf("while configuration datasource for %s: %w", dsn, err) } + sources = append(sources, dataSrc) + return sources, nil } @@ -219,27 +253,31 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig break } - //for backward compat ('type' was not mandatory, detect it) + // for backward compat ('type' was not mandatory, detect it) if guessType := detectBackwardCompatAcquis(sub); guessType != "" { sub.Source = guessType } - //it's an empty item, skip it + // it's an empty item, skip it if len(sub.Labels) == 0 { if sub.Source == "" { log.Debugf("skipping empty item in %s", acquisFile) continue } if sub.Source != "docker" { - //docker is the only source that can be empty + // docker is the only source that can be empty return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) } } if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) } - if GetDataSourceIface(sub.Source) == nil { - return nil, fmt.Errorf("unknown data source %s in %s (position: %d)", sub.Source, acquisFile, idx) + + // pre-check that the source is valid + _, err := GetDataSourceIface(sub.Source) + if err != nil { + return nil, fmt.Errorf("in file %s (position: %d) - %w", acquisFile, idx, err) } + uniqueId := uuid.NewString() sub.UniqueId = uniqueId src, err := DataSourceConfigure(sub, metrics_level) @@ -341,7 +379,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb } for i := range len(sources) { - subsrc := sources[i] //ensure its a copy + subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) AcquisTomb.Go(func() error { @@ -369,7 +407,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb err = subsrc.OneShotAcquisition(outChan, AcquisTomb) } if err != nil { - //if one of the acqusition returns an error, we kill the others to properly shutdown + // if one of the acqusition returns an error, we kill the others to properly shutdown AcquisTomb.Kill(err) } return nil @@ -378,5 +416,6 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb /*return only when acquisition is over (cat) or never (tail)*/ err := AcquisTomb.Wait() + return err } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index a5eecbc20ed..e39199f9cdb 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -79,13 +79,8 @@ func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" } // appendMockSource is only used to add mock source for tests func appendMockSource() { - if GetDataSourceIface("mock") == nil { - AcquisitionSources["mock"] = func() DataSource { return &MockSource{} } - } - - if GetDataSourceIface("mock_cant_run") == nil { - AcquisitionSources["mock_cant_run"] = func() DataSource { return &MockSourceCantRun{} } - } + AcquisitionSources["mock"] = func() DataSource { return &MockSource{} } + AcquisitionSources["mock_cant_run"] = func() DataSource { return &MockSourceCantRun{} } } func TestDataSourceConfigure(t *testing.T) { @@ -150,7 +145,7 @@ labels: log_level: debug source: tutu `, - ExpectedError: "cannot find source tutu", + ExpectedError: "unknown data source tutu", }, { TestName: "mismatch_config", @@ -184,6 +179,7 @@ wowo: ajsajasjas yaml.Unmarshal([]byte(tc.String), &common) ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { return } @@ -270,7 +266,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/bad_source.yaml"}, }, - ExpectedError: "unknown data source does_not_exist in test_files/bad_source.yaml", + ExpectedError: "in file test_files/bad_source.yaml (position: 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", @@ -284,6 +280,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { t.Run(tc.TestName, func(t *testing.T) { dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { return } @@ -329,6 +326,7 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } + func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return errors.New("can't run in tail") } @@ -367,12 +365,14 @@ func (f *MockTail) GetMode() string { return "tail" } func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { return errors.New("can't run in cat mode") } + func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt } + <-t.Dying() return nil @@ -386,7 +386,7 @@ func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry, string) } func (f *MockTail) GetUuid() string { return "" } -//func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +// func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { func TestStartAcquisitionCat(t *testing.T) { sources := []DataSource{ @@ -456,6 +456,7 @@ func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) evt.Line.Src = "test" out <- evt } + t.Kill(errors.New("got error (tomb)")) return errors.New("got error") @@ -485,7 +486,7 @@ READLOOP: } } assert.Equal(t, 10, count) - //acquisTomb.Kill(nil) + // acquisTomb.Kill(nil) time.Sleep(1 * time.Second) cstest.RequireErrorContains(t, acquisTomb.Err(), "got error (tomb)") } @@ -542,9 +543,7 @@ func TestConfigureByDSN(t *testing.T) { }, } - if GetDataSourceIface("mockdsn") == nil { - AcquisitionSources["mockdsn"] = func() DataSource { return &MockSourceByDSN{} } - } + AcquisitionSources["mockdsn"] = func() DataSource { return &MockSourceByDSN{} } for _, tc := range tests { t.Run(tc.dsn, func(t *testing.T) { diff --git a/pkg/acquisition/appsec.go b/pkg/acquisition/appsec.go new file mode 100644 index 00000000000..81616d3d2b8 --- /dev/null +++ b/pkg/acquisition/appsec.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_appsec + +package acquisition + +import ( + appsecacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("appsec", func() DataSource { return &appsecacquisition.AppsecSource{} }) +} diff --git a/pkg/acquisition/cloudwatch.go b/pkg/acquisition/cloudwatch.go new file mode 100644 index 00000000000..e6b3d3e3e53 --- /dev/null +++ b/pkg/acquisition/cloudwatch.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_cloudwatch + +package acquisition + +import ( + cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("cloudwatch", func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }) +} diff --git a/pkg/acquisition/docker.go b/pkg/acquisition/docker.go new file mode 100644 index 00000000000..3bf792a039a --- /dev/null +++ b/pkg/acquisition/docker.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_docker + +package acquisition + +import ( + dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("docker", func() DataSource { return &dockeracquisition.DockerSource{} }) +} diff --git a/pkg/acquisition/file.go b/pkg/acquisition/file.go new file mode 100644 index 00000000000..1ff2e4a3c0e --- /dev/null +++ b/pkg/acquisition/file.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_file + +package acquisition + +import ( + fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("file", func() DataSource { return &fileacquisition.FileSource{} }) +} diff --git a/pkg/acquisition/journalctl.go b/pkg/acquisition/journalctl.go new file mode 100644 index 00000000000..691f961ae77 --- /dev/null +++ b/pkg/acquisition/journalctl.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_journalctl + +package acquisition + +import ( + journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("journalctl", func() DataSource { return &journalctlacquisition.JournalCtlSource{} }) +} diff --git a/pkg/acquisition/k8s.go b/pkg/acquisition/k8s.go new file mode 100644 index 00000000000..cb9446be285 --- /dev/null +++ b/pkg/acquisition/k8s.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_k8saudit + +package acquisition + +import ( + k8sauditacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kubernetesaudit" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("k8s-audit", func() DataSource { return &k8sauditacquisition.KubernetesAuditSource{} }) +} diff --git a/pkg/acquisition/kafka.go b/pkg/acquisition/kafka.go new file mode 100644 index 00000000000..7d315d87feb --- /dev/null +++ b/pkg/acquisition/kafka.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_kafka + +package acquisition + +import ( + kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("kafka", func() DataSource { return &kafkaacquisition.KafkaSource{} }) +} diff --git a/pkg/acquisition/kinesis.go b/pkg/acquisition/kinesis.go new file mode 100644 index 00000000000..b41372e7fb9 --- /dev/null +++ b/pkg/acquisition/kinesis.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_kinesis + +package acquisition + +import ( + kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("kinesis", func() DataSource { return &kinesisacquisition.KinesisSource{} }) +} diff --git a/pkg/acquisition/loki.go b/pkg/acquisition/loki.go new file mode 100644 index 00000000000..1eed6686591 --- /dev/null +++ b/pkg/acquisition/loki.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_loki + +package acquisition + +import ( + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("loki", func() DataSource { return &loki.LokiSource{} }) +} diff --git a/pkg/acquisition/modules/appsec/appsec_lnx_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go index 3e40a1f970c..61dfc536f5e 100644 --- a/pkg/acquisition/modules/appsec/appsec_lnx_test.go +++ b/pkg/acquisition/modules/appsec/appsec_lnx_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package appsecacquisition @@ -16,6 +15,7 @@ import ( func TestAppsecRuleTransformsOthers(t *testing.T) { log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ { name: "normalizepath", diff --git a/pkg/acquisition/modules/appsec/appsec_win_test.go b/pkg/acquisition/modules/appsec/appsec_win_test.go index e85d75df251..a6b8f3a0340 100644 --- a/pkg/acquisition/modules/appsec/appsec_win_test.go +++ b/pkg/acquisition/modules/appsec/appsec_win_test.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package appsecacquisition diff --git a/pkg/acquisition/s3.go b/pkg/acquisition/s3.go new file mode 100644 index 00000000000..73343b0408d --- /dev/null +++ b/pkg/acquisition/s3.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_s3 + +package acquisition + +import ( + s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("s3", func() DataSource { return &s3acquisition.S3Source{} }) +} diff --git a/pkg/acquisition/syslog.go b/pkg/acquisition/syslog.go new file mode 100644 index 00000000000..f62cc23b916 --- /dev/null +++ b/pkg/acquisition/syslog.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_syslog + +package acquisition + +import ( + syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("syslog", func() DataSource { return &syslogacquisition.SyslogSource{} }) +} diff --git a/pkg/acquisition/wineventlog.go b/pkg/acquisition/wineventlog.go new file mode 100644 index 00000000000..0c4889a3f5c --- /dev/null +++ b/pkg/acquisition/wineventlog.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_wineventlog + +package acquisition + +import ( + wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("wineventlog", func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }) +} diff --git a/pkg/cwversion/constraint/constraint.go b/pkg/cwversion/constraint/constraint.go new file mode 100644 index 00000000000..67593f9ebbc --- /dev/null +++ b/pkg/cwversion/constraint/constraint.go @@ -0,0 +1,32 @@ +package constraint + +import ( + "fmt" + + goversion "github.com/hashicorp/go-version" +) + +const ( + Parser = ">= 1.0, <= 3.0" + Scenario = ">= 1.0, <= 3.0" + API = "v1" + Acquis = ">= 1.0, < 2.0" +) + +func Satisfies(strvers string, constraint string) (bool, error) { + vers, err := goversion.NewVersion(strvers) + if err != nil { + return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) + } + + constraints, err := goversion.NewConstraint(constraint) + if err != nil { + return false, fmt.Errorf("failed to parse constraint '%s'", constraint) + } + + if !constraints.Check(vers) { + return false, nil + } + + return true, nil +} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index b208467aef5..867098e7d5a 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -4,11 +4,12 @@ import ( "fmt" "strings" - goversion "github.com/hashicorp/go-version" - + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" ) var ( @@ -16,14 +17,19 @@ var ( Libre2 = "WebAssembly" ) -const ( - Constraint_parser = ">= 1.0, <= 3.0" - Constraint_scenario = ">= 1.0, <= 3.0" - Constraint_api = "v1" - Constraint_acquis = ">= 1.0, < 2.0" -) - func FullString() string { + dsBuilt := []string{} + dsExcluded := []string{} + + for _, ds := range maptools.SortedKeys(acquisition.AcquisitionSources) { + if acquisition.AcquisitionSources[ds] != nil { + dsBuilt = append(dsBuilt, ds) + continue + } + + dsExcluded = append(dsExcluded, ds) + } + ret := fmt.Sprintf("version: %s\n", version.String()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) @@ -31,10 +37,18 @@ func FullString() string { ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) ret += fmt.Sprintf("User-Agent: %s\n", useragent.Default()) - ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) - ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) - ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) - ret += fmt.Sprintf("Constraint_acquis: %s\n", Constraint_acquis) + ret += fmt.Sprintf("Constraint_parser: %s\n", constraint.Parser) + ret += fmt.Sprintf("Constraint_scenario: %s\n", constraint.Scenario) + ret += fmt.Sprintf("Constraint_api: %s\n", constraint.API) + ret += fmt.Sprintf("Constraint_acquis: %s\n", constraint.Acquis) + + if len(dsBuilt) > 0 { + ret += fmt.Sprintf("Built data sources: %s\n", strings.Join(dsBuilt, ", ")) + } + + if len(dsExcluded) > 0 { + ret += fmt.Sprintf("Excluded data sources: %s\n", strings.Join(dsExcluded, ", ")) + } return ret } @@ -46,21 +60,3 @@ func VersionStrip() string { return ret[0] } - -func Satisfies(strvers string, constraint string) (bool, error) { - vers, err := goversion.NewVersion(strvers) - if err != nil { - return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) - } - - constraints, err := goversion.NewConstraint(constraint) - if err != nil { - return false, fmt.Errorf("failed to parse constraint '%s'", constraint) - } - - if !constraints.Check(vers) { - return false, nil - } - - return true, nil -} diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 6055a5308b5..1ae70fbfab3 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -292,13 +292,13 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.FormatVersion = "1.0" } - ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) + ok, err := constraint.Satisfies(bucketFactory.FormatVersion, constraint.Scenario) if err != nil { return nil, nil, fmt.Errorf("failed to check version: %w", err) } if !ok { - log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, constraint.Scenario) continue } diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index fe538023b61..b98db350254 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -21,7 +21,7 @@ import ( log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) @@ -85,12 +85,12 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ( log.Tracef("no version in %s, assuming '1.0'", node.Name) node.FormatVersion = "1.0" } - ok, err := cwversion.Satisfies(node.FormatVersion, cwversion.Constraint_parser) + ok, err := constraint.Satisfies(node.FormatVersion, constraint.Parser) if err != nil { return nil, fmt.Errorf("failed to check version : %s", err) } if !ok { - log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, cwversion.Constraint_parser) + log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, constraint.Parser) continue } diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 55af951bf89..01368091a6b 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -73,9 +73,9 @@ func validateDataSource(opaqueDS DataSourceItem) error { // source must be known - ds := acquisition.GetDataSourceIface(commonDS.Source) - if ds == nil { - return fmt.Errorf("unknown source '%s'", commonDS.Source) + ds, err := acquisition.GetDataSourceIface(commonDS.Source) + if err != nil { + return err } // unmarshal and validate the rest with the specific implementation diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index c744e7d6796..6f61b5dac78 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -871,7 +871,7 @@ func TestDetectDatasourceValidation(t *testing.T) { datasource: source: wombat`, expected: setup.Setup{Setup: []setup.ServiceSetup{}}, - expectedErr: "invalid datasource for foobar: unknown source 'wombat'", + expectedErr: "invalid datasource for foobar: unknown data source wombat", }, { name: "source is misplaced", config: ` diff --git a/test/bats.mk b/test/bats.mk index 8f507cb659b..631cc55579b 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -38,6 +38,7 @@ define ENV := export TEST_DIR="$(TEST_DIR)" export LOCAL_DIR="$(LOCAL_DIR)" export BIN_DIR="$(BIN_DIR)" +# append .min to the binary names to use the minimal profile export CROWDSEC="$(CROWDSEC)" export CSCLI="$(CSCLI)" export CONFIG_YAML="$(CONFIG_DIR)/config.yaml" @@ -75,6 +76,11 @@ bats-update-tools: ## Install/update tools required for functional tests # Build and installs crowdsec in a local directory. Rebuilds if already exists. bats-build: bats-environment ## Build binaries for functional tests @$(MKDIR) $(BIN_DIR) $(LOG_DIR) $(PID_DIR) $(BATS_PLUGIN_DIR) + # minimal profile + @$(MAKE) build DEBUG=1 TEST_COVERAGE=$(TEST_COVERAGE) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) BUILD_PROFILE=minimal + @install -m 0755 cmd/crowdsec/crowdsec $(BIN_DIR)/crowdsec.min + @install -m 0755 cmd/crowdsec-cli/cscli $(BIN_DIR)/cscli.min + # default profile @$(MAKE) build DEBUG=1 TEST_COVERAGE=$(TEST_COVERAGE) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) @install -m 0755 cmd/crowdsec/crowdsec cmd/crowdsec-cli/cscli $(BIN_DIR)/ @install -m 0755 cmd/notification-*/notification-* $(BATS_PLUGIN_DIR)/ diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 83072b0f159..aa5830a6bae 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -199,7 +199,42 @@ teardown() { assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } -@test "crowdsec (disabled datasources)" { +@test "crowdsec (datasource not built)" { + config_set '.common.log_media="stdout"' + + # a datasource cannot run - it's not built in the log processor executable + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" + cat >"$ACQUIS_DIR"/foo.yaml <<-EOT + source: journalctl + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + #shellcheck disable=SC2016 + rune -1 wait-for \ + --err "crowdsec init: while loading acquisition config: in file $ACQUIS_DIR/foo.yaml (position: 0) - data source journalctl is not built in this version of crowdsec" \ + env PATH='' "$CROWDSEC".min + + # auto-detection of journalctl_filter still works + cat >"$ACQUIS_DIR"/foo.yaml <<-EOT + source: whatever + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + #shellcheck disable=SC2016 + rune -1 wait-for \ + --err "crowdsec init: while loading acquisition config: in file $ACQUIS_DIR/foo.yaml (position: 0) - data source journalctl is not built in this version of crowdsec" \ + env PATH='' "$CROWDSEC".min +} + +@test "crowdsec (disabled datasource)" { if is_package_testing; then # we can't hide journalctl in package testing # because crowdsec is run from systemd From 6810b41dd872670d58e028b1cfa4d12bffc8b19b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:28:16 +0200 Subject: [PATCH 283/318] refact pkg/database: context propagation (start) (#3226) * refact pkg/database: context propagation (part) * more context propagation (usagemetrics) * propagate errors when updating metrics --- cmd/crowdsec/metrics.go | 6 ++++-- pkg/apiserver/controllers/v1/usagemetrics.go | 15 ++++++++------- pkg/database/alerts.go | 4 +--- pkg/database/bouncers.go | 5 +++-- pkg/database/decisions.go | 5 +++-- pkg/database/machines.go | 5 +++-- pkg/database/metrics.go | 5 +++-- 7 files changed, 25 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index d3c6e172091..ff280fc3512 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -118,7 +118,9 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return } - decisions, err := dbClient.QueryDecisionCountByScenario() + ctx := r.Context() + + decisions, err := dbClient.QueryDecisionCountByScenario(ctx) if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) @@ -138,7 +140,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha "include_capi": {"false"}, } - alerts, err := dbClient.AlertsCountPerScenario(alertsFilter) + alerts, err := dbClient.AlertsCountPerScenario(ctx, alertsFilter) if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 74f27bb6cf4..27b1b819a54 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -1,6 +1,7 @@ package v1 import ( + "context" "encoding/json" "errors" "net/http" @@ -18,17 +19,15 @@ import ( ) // updateBaseMetrics updates the base metrics for a machine or bouncer -func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { +func (c *Controller) updateBaseMetrics(ctx context.Context, machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { switch { case machineID != "": - c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) + return c.DBClient.MachineUpdateBaseMetrics(ctx, machineID, baseMetrics, hubItems, datasources) case bouncer != nil: - c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) + return c.DBClient.BouncerUpdateBaseMetrics(ctx, bouncer.Name, bouncer.Type, baseMetrics) default: return errors.New("no machineID or bouncerName set") } - - return nil } // UsageMetrics receives metrics from log processors and remediation components @@ -172,7 +171,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { } } - err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) + ctx := gctx.Request.Context() + + err := c.updateBaseMetrics(ctx, machineID, bouncer, baseMetrics, hubItems, datasources) if err != nil { logger.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) @@ -190,7 +191,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { receivedAt := time.Now().UTC() - if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { + if _, err := c.DBClient.CreateMetric(ctx, generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { logger.Error(err) c.HandleDBErrors(gctx, err) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 0f6d87fb1b6..3e3e480c7d6 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -941,14 +941,12 @@ func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]str return alerts.Where(preds...), nil } -func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string]int, error) { +func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string][]string) (map[string]int, error) { var res []struct { Scenario string Count int } - ctx := context.TODO() - query := c.Ent.Alert.Query() query, err := BuildAlertRequestFromFilter(query, filters) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index f79e9580afe..a7378bbb203 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strings" "time" @@ -20,7 +21,7 @@ func (e *BouncerNotFoundError) Error() string { return fmt.Sprintf("'%s' does not exist", e.BouncerName) } -func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { +func (c *Client) BouncerUpdateBaseMetrics(ctx context.Context, bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -32,7 +33,7 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string SetOsversion(*os.Version). SetFeatureflags(features). SetType(bouncerType). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index fc582247e59..5fd4757c883 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strconv" "strings" @@ -173,7 +174,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return data, nil } -func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) { +func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*DecisionsByScenario, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -186,7 +187,7 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) var r []*DecisionsByScenario - err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(c.CTX, &r) + err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(ctx, &r) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 3c8cbabbfa7..27d737e625e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strings" "time" @@ -29,7 +30,7 @@ func (e *MachineNotFoundError) Error() string { return fmt.Sprintf("'%s' does not exist", e.MachineID) } -func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { +func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -63,7 +64,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update base machine metrics in database: %w", err) } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 7626c39f6f1..1619fcc923b 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "time" @@ -8,14 +9,14 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) -func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { +func (c *Client) CreateMetric(ctx context.Context, generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. Create(). SetGeneratedType(generatedType). SetGeneratedBy(generatedBy). SetReceivedAt(receivedAt). SetPayload(payload). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("CreateMetric: %s", err) return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) From 8a74faed43103a03d6499d6528b635fc35abe839 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 21:16:55 +0200 Subject: [PATCH 284/318] context propagation: cscli {capi,lapi,papi} (#3228) * context propagation: lapi status, capi status, papi status * context propagation: lapi register, capi register * lint --- cmd/crowdsec-cli/clicapi/capi.go | 20 ++++++++--------- cmd/crowdsec-cli/clilapi/lapi.go | 30 +++++++++++++------------- cmd/crowdsec-cli/clipapi/papi.go | 11 ++++++---- cmd/crowdsec-cli/clisupport/support.go | 18 ++++++++-------- pkg/apiclient/auth_service_test.go | 6 ++++-- pkg/apiclient/client.go | 4 ++-- pkg/apiclient/client_test.go | 16 ++++++++++---- pkg/apiserver/papi.go | 4 ++-- 8 files changed, 61 insertions(+), 48 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 49f68dd6b9e..fbc50066287 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -58,7 +58,7 @@ func (cli *cliCapi) NewCommand() *cobra.Command { return cmd } -func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { +func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputFile string) error { cfg := cli.cfg() capiUser, err := idgen.GenerateMachineID(capiUserPrefix) @@ -73,7 +73,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) } - _, err = apiclient.RegisterClient(&apiclient.Config{ + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ MachineID: capiUser, Password: password, URL: apiurl, @@ -134,8 +134,8 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { Short: "Register to Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(capiUserPrefix, outputFile) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), capiUserPrefix, outputFile) }, } @@ -148,7 +148,7 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { } // queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. -func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { +func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { apiURL, err := url.Parse(credURL) if err != nil { return false, false, err @@ -186,7 +186,7 @@ func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: itemsForAPI, } - authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) + authResp, _, err := client.Auth.AuthenticateWatcher(ctx, t) if err != nil { return false, false, err } @@ -200,7 +200,7 @@ func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return true, false, nil } -func (cli *cliCapi) Status(out io.Writer, hub *cwhub.Hub) error { +func (cli *cliCapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() if err := require.CAPIRegistered(cfg); err != nil { @@ -212,7 +212,7 @@ func (cli *cliCapi) Status(out io.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - auth, enrolled, err := queryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := queryCAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } @@ -234,13 +234,13 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Short: "Check status with the Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } - return cli.Status(color.Output, hub) + return cli.Status(cmd.Context(), color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index fa229002512..eff7ae64476 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -44,7 +44,7 @@ func New(cfg configGetter) *cliLapi { } // queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. -func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { +func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { apiURL, err := url.Parse(credURL) if err != nil { return false, err @@ -76,7 +76,7 @@ func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return true, nil } -func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { +func (cli *cliLapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() cred := cfg.API.Client.Credentials @@ -84,7 +84,7 @@ func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - _, err := queryLAPIStatus(hub, cred.URL, cred.Login, cred.Password) + _, err := queryLAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) } @@ -94,7 +94,7 @@ func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliLapi) register(apiURL string, outputFile string, machine string, token string) error { +func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile string, machine string, token string) error { var err error lapiUser := machine @@ -114,7 +114,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string, t return fmt.Errorf("parsing api url: %w", err) } - _, err = apiclient.RegisterClient(&apiclient.Config{ + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ MachineID: lapiUser, Password: password, RegistrationToken: token, @@ -195,13 +195,13 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Short: "Check authentication to Local API (LAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } - return cli.Status(color.Output, hub) + return cli.Status(cmd.Context(), color.Output, hub) }, } @@ -223,8 +223,8 @@ func (cli *cliLapi) newRegisterCmd() *cobra.Command { Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(apiURL, outputFile, machine, token) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), apiURL, outputFile, machine, token) }, } @@ -513,14 +513,14 @@ func detectStaticField(grokStatics []parser.ExtraField) []string { for _, static := range grokStatics { if static.Parsed != "" { - fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed) + fieldName := "evt.Parsed." + static.Parsed if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } if static.Meta != "" { - fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta) + fieldName := "evt.Meta." + static.Meta if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -546,7 +546,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { if node.Grok.RunTimeRegexp != nil { for _, capturedField := range node.Grok.RunTimeRegexp.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -558,7 +558,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { // ignore error (parser does not exist?) if err == nil { for _, capturedField := range grokCompiled.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -593,7 +593,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { for _, subnode := range node.LeavesNodes { if subnode.Grok.RunTimeRegexp != nil { for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -605,7 +605,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { if err == nil { // ignore error (parser does not exist?) for _, capturedField := range grokCompiled.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 0752267707b..747b8c01b9b 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -1,6 +1,7 @@ package clipapi import ( + "context" "fmt" "io" "time" @@ -55,7 +56,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { return cmd } -func (cli *cliPapi) Status(out io.Writer, db *database.Client) error { +func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Client) error { cfg := cli.cfg() apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) @@ -68,7 +69,7 @@ func (cli *cliPapi) Status(out io.Writer, db *database.Client) error { return fmt.Errorf("unable to initialize PAPI client: %w", err) } - perms, err := papi.GetPermissions() + perms, err := papi.GetPermissions(ctx) if err != nil { return fmt.Errorf("unable to get PAPI permissions: %w", err) } @@ -103,12 +104,14 @@ func (cli *cliPapi) newStatusCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } - return cli.Status(color.Output, db) + return cli.Status(ctx, color.Output, db) }, } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index f161c66c802..e9837b03fe7 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -231,13 +231,13 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { return nil } -func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpLAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting LAPI status") out := new(bytes.Buffer) cl := clilapi.New(cli.cfg) - err := cl.Status(out, hub) + err := cl.Status(ctx, out, hub) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -249,13 +249,13 @@ func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpCAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting CAPI status") out := new(bytes.Buffer) cc := clicapi.New(cli.cfg) - err := cc.Status(out, hub) + err := cc.Status(ctx, out, hub) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -267,13 +267,13 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliSupport) dumpPAPIStatus(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpPAPIStatus(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting PAPI status") out := new(bytes.Buffer) cp := clipapi.New(cli.cfg) - err := cp.Status(out, db) + err := cp.Status(ctx, out, db) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -534,17 +534,17 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { } if !skipCAPI { - if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { + if err = cli.dumpCAPIStatus(ctx, zipWriter, hub); err != nil { log.Warnf("could not collect CAPI status: %s", err) } - if err = cli.dumpPAPIStatus(zipWriter, db); err != nil { + if err = cli.dumpPAPIStatus(ctx, zipWriter, db); err != nil { log.Warnf("could not collect PAPI status: %s", err) } } if !skipLAPI { - if err = cli.dumpLAPIStatus(zipWriter, hub); err != nil { + if err = cli.dumpLAPIStatus(ctx, zipWriter, hub); err != nil { log.Warnf("could not collect LAPI status: %s", err) } diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 344e377ad0f..d22c9394014 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -92,7 +92,9 @@ func TestWatcherRegister(t *testing.T) { VersionPrefix: "v1", } - client, err := RegisterClient(&clientconfig, &http.Client{}) + ctx := context.Background() + + client, err := RegisterClient(ctx, &clientconfig, &http.Client{}) require.NoError(t, err) log.Printf("->%T", client) @@ -102,7 +104,7 @@ func TestWatcherRegister(t *testing.T) { for _, errorCodeToTest := range errorCodesToTest { clientconfig.MachineID = fmt.Sprintf("login_%d", errorCodeToTest) - client, err = RegisterClient(&clientconfig, &http.Client{}) + client, err = RegisterClient(ctx, &clientconfig, &http.Client{}) require.Nil(t, client, "nil expected for the response code %d", errorCodeToTest) require.Error(t, err, "error expected for the response code %d", errorCodeToTest) } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 02a99037a04..47d97a28344 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -167,7 +167,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt return c, nil } -func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { +func RegisterClient(ctx context.Context, config *Config, client *http.Client) (*ApiClient, error) { transport, baseURL := createTransport(config.URL) if client == nil { @@ -199,7 +199,7 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) - resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) + resp, err := c.Auth.RegisterWatcher(ctx, models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) if err != nil { /*if we have http status, return it*/ if resp != nil && resp.Response != nil { diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index e632ff428c0..d1f58f33ad2 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -242,7 +242,9 @@ func TestNewClientRegisterKO(t *testing.T) { apiURL, err := url.Parse("http://127.0.0.1:4242/") require.NoError(t, err) - _, err = RegisterClient(&Config{ + ctx := context.Background() + + _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, @@ -272,7 +274,9 @@ func TestNewClientRegisterOK(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - client, err := RegisterClient(&Config{ + ctx := context.Background() + + client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, @@ -304,7 +308,9 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { t.Fatalf("parsing api url: %s", apiURL) } - client, err := RegisterClient(&Config{ + ctx := context.Background() + + client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, @@ -333,7 +339,9 @@ func TestNewClientBadAnswer(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - _, err = RegisterClient(&Config{ + ctx := context.Background() + + _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 0d0fd0ecd42..0a69f086a7f 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -156,11 +156,11 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { return nil } -func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { +func (p *Papi) GetPermissions(ctx context.Context) (PapiPermCheckSuccess, error) { httpClient := p.apiClient.GetClient() papiCheckUrl := fmt.Sprintf("%s%s%s", p.URL, types.PAPIVersion, types.PAPIPermissionsUrl) - req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, papiCheckUrl, nil) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request: %w", err) } From 1591a0c46ee3c3b1643d7078887f4bc262ed155b Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 13 Sep 2024 10:57:48 +0200 Subject: [PATCH 285/318] Update go-re2 (#3230) --- go.mod | 5 +++-- go.sum | 12 ++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index bce1ca12316..ec8566db84a 100644 --- a/go.mod +++ b/go.mod @@ -78,9 +78,9 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 github.com/spf13/cobra v1.8.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 - github.com/wasilibs/go-re2 v1.6.0 + github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 golang.org/x/mod v0.15.0 @@ -195,6 +195,7 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect diff --git a/go.sum b/go.sum index eec85b5b2e9..ff73dc56332 100644 --- a/go.sum +++ b/go.sum @@ -644,8 +644,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -659,8 +660,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= @@ -690,10 +692,12 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wasilibs/go-re2 v1.6.0 h1:CLlhDebt38wtl/zz4ww+hkXBMcxjrKFvTDXzFW2VOz8= -github.com/wasilibs/go-re2 v1.6.0/go.mod h1:prArCyErsypRBI/jFAFJEbzyHzjABKqkzlidF0SNA04= +github.com/wasilibs/go-re2 v1.7.0 h1:bYhl8gn+a9h01dxwotNycxkiFPTiSgwUrIz8KZJ90Lc= +github.com/wasilibs/go-re2 v1.7.0/go.mod h1:sUsZMLflgl+LNivDE229omtmvjICmOseT9xOy199VDU= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 h1:OvLBa8SqJnZ6P+mjlzc2K7PM22rRUPE1x32G9DTPrC4= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52/go.mod h1:jMeV4Vpbi8osrE/pKUxRZkVaA0EX7NZN0A9/oRzgpgY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= From ce085dc4cd11c2ca650ff373afa599417c77bdad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:19:14 +0200 Subject: [PATCH 286/318] logs and user messages: use "parse" and "serialize" instead of marshal/unmarshal (#3240) --- cmd/crowdsec-cli/clialert/alerts.go | 4 ++-- cmd/crowdsec-cli/clibouncer/bouncers.go | 6 +++--- cmd/crowdsec-cli/clicapi/capi.go | 2 +- cmd/crowdsec-cli/cliconsole/console.go | 6 +++--- cmd/crowdsec-cli/clihub/items.go | 4 ++-- cmd/crowdsec-cli/clihubtest/create.go | 2 +- cmd/crowdsec-cli/cliitem/appsec.go | 2 +- cmd/crowdsec-cli/clilapi/lapi.go | 2 +- cmd/crowdsec-cli/climachine/machines.go | 8 ++++---- cmd/crowdsec-cli/climetrics/list.go | 2 +- cmd/crowdsec-cli/climetrics/store.go | 2 +- cmd/crowdsec-cli/clinotifications/notifications.go | 8 ++++---- cmd/crowdsec-cli/clisetup/setup.go | 2 +- cmd/crowdsec-cli/clisimulation/simulation.go | 4 ++-- cmd/crowdsec-cli/config_backup.go | 2 +- cmd/crowdsec-cli/config_restore.go | 2 +- cmd/crowdsec-cli/config_show.go | 6 +++--- cmd/crowdsec/pour.go | 4 ++-- cmd/notification-file/main.go | 2 +- pkg/acquisition/acquisition.go | 2 +- pkg/acquisition/modules/appsec/appsec.go | 2 +- pkg/acquisition/modules/kafka/kafka.go | 2 +- pkg/acquisition/modules/kubernetesaudit/k8s_audit.go | 2 +- .../modules/wineventlog/wineventlog_windows.go | 2 +- pkg/alertcontext/config.go | 2 +- pkg/apiserver/apic_metrics.go | 4 ++-- pkg/apiserver/controllers/v1/alerts.go | 2 +- pkg/apiserver/controllers/v1/usagemetrics.go | 2 +- pkg/apiserver/papi.go | 6 +++--- pkg/appsec/loader.go | 2 +- pkg/csconfig/api.go | 4 ++-- pkg/csconfig/api_test.go | 2 +- pkg/csconfig/config_test.go | 2 +- pkg/csconfig/console.go | 2 +- pkg/csconfig/crowdsec_service.go | 2 +- pkg/csconfig/simulation.go | 2 +- pkg/csconfig/simulation_test.go | 4 ++-- pkg/csplugin/broker_test.go | 4 ++-- pkg/cwhub/hub.go | 2 +- pkg/cwhub/sync.go | 2 +- pkg/database/ent/machine.go | 4 ++-- pkg/database/errors.go | 4 ++-- pkg/hubtest/coverage.go | 4 ++-- pkg/hubtest/hubtest_item.go | 4 ++-- pkg/leakybucket/buckets_test.go | 4 ++-- pkg/leakybucket/manager_load.go | 2 +- pkg/leakybucket/manager_run.go | 4 ++-- pkg/leakybucket/overflows.go | 6 +++--- pkg/leakybucket/timemachine.go | 2 +- pkg/leakybucket/trigger.go | 2 +- pkg/parser/enrich_date.go | 4 ++-- pkg/parser/enrich_unmarshal.go | 2 +- pkg/parser/parsing_test.go | 2 +- pkg/setup/detect.go | 2 +- pkg/setup/install.go | 2 +- test/bats/07_setup.bats | 2 +- 56 files changed, 87 insertions(+), 87 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 757a84927e5..006d7ac7d8c 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -521,14 +521,14 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { case "json": data, err := json.MarshalIndent(alert, "", " ") if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } fmt.Printf("%s\n", string(data)) case "raw": data, err := yaml.Marshal(alert) if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } fmt.Println(string(data)) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 0d1484bcc6b..25c80d16404 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -181,7 +181,7 @@ func (cli *cliBouncers) List(out io.Writer, db *database.Client) error { enc.SetIndent("", " ") if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil @@ -234,7 +234,7 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { case "json": j, err := json.Marshal(key) if err != nil { - return errors.New("unable to marshal api key") + return errors.New("unable to serialize api key") } fmt.Print(string(j)) @@ -458,7 +458,7 @@ func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { enc.SetIndent("", " ") if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index fbc50066287..24c3ba054a9 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -104,7 +104,7 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" { diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index e4b4039bdd2..af1ba316c2d 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -280,7 +280,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { } data, err := json.MarshalIndent(out, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) case "raw": @@ -318,7 +318,7 @@ func (cli *cliConsole) dumpConfig() error { out, err := yaml.Marshal(serverCfg.ConsoleConfig) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) + return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) } if serverCfg.ConsoleConfigPath == "" { @@ -361,7 +361,7 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { if changed { fileContent, err := yaml.Marshal(cfg.API.Server.OnlineClient.Credentials) if err != nil { - return fmt.Errorf("cannot marshal credentials: %w", err) + return fmt.Errorf("cannot serialize credentials: %w", err) } log.Infof("Updating credentials file: %s", cfg.API.Server.OnlineClient.CredentialsFilePath) diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index 0ab89654dac..f86fe65a2a1 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -106,7 +106,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st x, err := json.MarshalIndent(hubStatus, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal: %w", err) + return fmt.Errorf("failed to parse: %w", err) } out.Write(x) @@ -158,7 +158,7 @@ func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusUR case "json": b, err := json.MarshalIndent(*item, "", " ") if err != nil { - return fmt.Errorf("unable to marshal item: %w", err) + return fmt.Errorf("unable to serialize item: %w", err) } fmt.Print(string(b)) diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go index e0834f7e569..3822bed8903 100644 --- a/cmd/crowdsec-cli/clihubtest/create.go +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -134,7 +134,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios } data, err := yaml.Marshal(configFileData) if err != nil { - return fmt.Errorf("marshal: %w", err) + return fmt.Errorf("serialize: %w", err) } _, err = fd.Write(data) if err != nil { diff --git a/cmd/crowdsec-cli/cliitem/appsec.go b/cmd/crowdsec-cli/cliitem/appsec.go index db567f86a32..44afa2133bd 100644 --- a/cmd/crowdsec-cli/cliitem/appsec.go +++ b/cmd/crowdsec-cli/cliitem/appsec.go @@ -62,7 +62,7 @@ func NewAppsecRule(cfg configGetter) *cliItem { } if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to unmarshal yaml file %s: %w", item.State.LocalPath, err) + return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) } for _, ruleType := range appsec_rule.SupportedTypes() { diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index eff7ae64476..75fdc5c239d 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -147,7 +147,7 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" { diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index bf8656105aa..3df176d786d 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -232,7 +232,7 @@ func (cli *cliMachines) List(out io.Writer, db *database.Client) error { enc.SetIndent("", " ") if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil @@ -378,7 +378,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" && dumpFile != "-" { @@ -626,7 +626,7 @@ func (cli *cliMachines) inspect(machine *ent.Machine) error { enc.SetIndent("", " ") if err := enc.Encode(newMachineInfo(machine)); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil @@ -648,7 +648,7 @@ func (cli *cliMachines) inspectHub(machine *ent.Machine) error { enc.SetIndent("", " ") if err := enc.Encode(machine.Hubstate); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index d3afbef0669..ba827634052 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -68,7 +68,7 @@ func (cli *cliMetrics) list() error { case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) + return fmt.Errorf("failed to serialize metric types: %w", err) } fmt.Println(string(x)) diff --git a/cmd/crowdsec-cli/climetrics/store.go b/cmd/crowdsec-cli/climetrics/store.go index 5de50558e89..55fab5dbd7f 100644 --- a/cmd/crowdsec-cli/climetrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -260,7 +260,7 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, case "json": x, err := json.MarshalIndent(want, "", " ") if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) + return fmt.Errorf("failed to serialize metrics: %w", err) } out.Write(x) default: diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index eb568ca5fa6..314f97db23e 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -172,7 +172,7 @@ func (cli *cliNotifications) newListCmd() *cobra.Command { } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { - return fmt.Errorf("failed to marshal notification configuration: %w", err) + return fmt.Errorf("failed to serialize notification configuration: %w", err) } fmt.Printf("%s", string(x)) } else if cfg.Cscli.Output == "raw" { @@ -231,7 +231,7 @@ func (cli *cliNotifications) newInspectCmd() *cobra.Command { } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(cfg, "", " ") if err != nil { - return fmt.Errorf("failed to marshal notification configuration: %w", err) + return fmt.Errorf("failed to serialize notification configuration: %w", err) } fmt.Printf("%s", string(x)) } @@ -331,7 +331,7 @@ func (cli cliNotifications) newTestCmd() *cobra.Command { CreatedAt: time.Now().UTC().Format(time.RFC3339), } if err := yaml.Unmarshal([]byte(alertOverride), alert); err != nil { - return fmt.Errorf("failed to unmarshal alert override: %w", err) + return fmt.Errorf("failed to parse alert override: %w", err) } pluginBroker.PluginChannel <- csplugin.ProfileAlert{ @@ -387,7 +387,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not if alertOverride != "" { if err := json.Unmarshal([]byte(alertOverride), alert); err != nil { - return fmt.Errorf("can't unmarshal data in the alert flag: %w", err) + return fmt.Errorf("can't parse data in the alert flag: %w", err) } } diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 8aee45b4287..269cdfb78e9 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -227,7 +227,7 @@ func setupAsString(cs setup.Setup, outYaml bool) (string, error) { ) wrap := func(err error) error { - return fmt.Errorf("while marshaling setup: %w", err) + return fmt.Errorf("while serializing setup: %w", err) } indentLevel := 2 diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 9d9defd78e7..8136aa213c3 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -220,7 +220,7 @@ func (cli *cliSimulation) dumpSimulationFile() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal simulation configuration: %w", err) + return fmt.Errorf("unable to serialize simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) @@ -242,7 +242,7 @@ func (cli *cliSimulation) disableGlobalSimulation() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal new simulation configuration: %w", err) + return fmt.Errorf("unable to serialize new simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index e8ac6213530..d23aff80a78 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -74,7 +74,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") if err != nil { - return fmt.Errorf("failed marshaling upstream parsers: %w", err) + return fmt.Errorf("failed to serialize upstream parsers: %w", err) } err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index fc3670165f8..c32328485ec 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -40,7 +40,7 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { err = json.Unmarshal(file, &upstreamList) if err != nil { - return fmt.Errorf("error unmarshaling %s: %w", upstreamListFN, err) + return fmt.Errorf("error parsing %s: %w", upstreamListFN, err) } for _, toinstall := range upstreamList { diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index e411f5a322b..2d3ac488ba2 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -50,7 +50,7 @@ func (cli *cliConfig) showKey(key string) error { case "json": data, err := json.MarshalIndent(output, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) @@ -212,14 +212,14 @@ func (cli *cliConfig) show() error { case "json": data, err := json.MarshalIndent(cfg, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) case "raw": data, err := yaml.Marshal(cfg) if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 388c7a6c1b3..1382a909ab3 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -32,7 +32,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea if parsed.MarshaledTime != "" { z := &time.Time{} if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) + log.Warningf("Failed to parse time from event '%s' : %s", parsed.MarshaledTime, err) } else { log.Warning("Starting buckets garbage collection ...") @@ -61,7 +61,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea if len(parsed.MarshaledTime) != 0 { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("failed to unmarshal time from event : %s", err) + log.Warningf("failed to parse time from event : %s", err) } } } diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index 7fc529cff41..f6649b1f395 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -210,7 +210,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* d := PluginConfig{} err := yaml.Unmarshal(config.Config, &d) if err != nil { - logger.Error("Failed to unmarshal config", "error", err) + logger.Error("Failed to parse config", "error", err) return &protobufs.Empty{}, err } FileWriteMutex = &sync.Mutex{} diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 38bf228abbc..a737881dd4d 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -129,7 +129,7 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) if err != nil { - return nil, fmt.Errorf("unable to marshal back interface: %w", err) + return nil, fmt.Errorf("unable to serialize back interface: %w", err) } dataSrc, err := GetDataSourceIface(commonConfig.Source) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 5b0661a21b7..8a93326c7e3 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -393,7 +393,7 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(statusCode) body, err := json.Marshal(appsecResponse) if err != nil { - logger.Errorf("unable to marshal response: %s", err) + logger.Errorf("unable to serialize response: %s", err) rw.WriteHeader(http.StatusInternalServerError) } else { rw.Write(body) diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index ca0a7556fca..a0d7fc39bcc 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -82,7 +82,7 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { k.Config.Mode = configuration.TAIL_MODE } - k.logger.Debugf("successfully unmarshaled kafka configuration : %+v", k.Config) + k.logger.Debugf("successfully parsed kafka configuration : %+v", k.Config) return err } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index e48a074b764..8ba5b2d06e0 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -196,7 +196,7 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R } bytesEvent, err := json.Marshal(auditEvent) if err != nil { - ka.logger.Errorf("Error marshaling audit event: %s", err) + ka.logger.Errorf("Error serializing audit event: %s", err) continue } ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index c6b10b7c38c..4f2384d71db 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -149,7 +149,7 @@ func (w *WinEventLogSource) buildXpathQuery() (string, error) { queryList := QueryList{Select: Select{Path: w.config.EventChannel, Query: query}} xpathQuery, err := xml.Marshal(queryList) if err != nil { - w.logger.Errorf("Marshal failed: %v", err) + w.logger.Errorf("Serialize failed: %v", err) return "", err } w.logger.Debugf("xpathQuery: %s", xpathQuery) diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index da05c937b18..6ef877619e4 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -133,7 +133,7 @@ func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { feedback, err := json.Marshal(c.Crowdsec.ContextToSend) if err != nil { - return fmt.Errorf("marshaling console context: %s", err) + return fmt.Errorf("serializing console context: %s", err) } log.Debugf("console context to send: %s", feedback) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 176984f1ad6..5c6a550a6a0 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -70,7 +70,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal bouncer metric (%s)", err) + log.Errorf("unable to parse bouncer metric (%s)", err) continue } @@ -132,7 +132,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal log processor metric (%s)", err) + log.Errorf("unable to parse log processor metric (%s)", err) continue } diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 3d4309b1347..84b3094865c 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -63,7 +63,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { var Metas models.Meta if err := json.Unmarshal([]byte(eventItem.Serialized), &Metas); err != nil { - log.Errorf("unable to unmarshall events meta '%s' : %s", eventItem.Serialized, err) + log.Errorf("unable to parse events meta '%s' : %s", eventItem.Serialized, err) } outputAlert.Events = append(outputAlert.Events, &models.Event{ diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 27b1b819a54..5b2c3e3b1a9 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -183,7 +183,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { jsonPayload, err := json.Marshal(payload) if err != nil { - logger.Errorf("Failed to marshal usage metrics: %s", err) + logger.Errorf("Failed to serialize usage metrics: %s", err) c.HandleDBErrors(gctx, err) return diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 0a69f086a7f..89ad93930a1 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -245,7 +245,7 @@ func (p *Papi) Pull() error { if lastTimestampStr == nil { binTime, err := lastTimestamp.MarshalText() if err != nil { - return fmt.Errorf("failed to marshal last timestamp: %w", err) + return fmt.Errorf("failed to serialize last timestamp: %w", err) } if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { @@ -255,7 +255,7 @@ func (p *Papi) Pull() error { } } else { if err := lastTimestamp.UnmarshalText([]byte(*lastTimestampStr)); err != nil { - return fmt.Errorf("failed to unmarshal last timestamp: %w", err) + return fmt.Errorf("failed to parse last timestamp: %w", err) } } @@ -268,7 +268,7 @@ func (p *Papi) Pull() error { binTime, err := newTime.MarshalText() if err != nil { - return fmt.Errorf("failed to marshal last timestamp: %w", err) + return fmt.Errorf("failed to serialize last timestamp: %w", err) } err = p.handleEvent(event, false) diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 9a3bfb6b668..c724010cec2 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -28,7 +28,7 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { err = yaml.UnmarshalStrict(content, &rule) if err != nil { - log.Warnf("unable to unmarshal file %s : %s", hubAppsecRuleItem.State.LocalPath, err) + log.Warnf("unable to parse file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 4a28b590e80..3014b729a9e 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -99,7 +99,7 @@ func (o *OnlineApiClientCfg) Load() error { err = dec.Decode(o.Credentials) if err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + return fmt.Errorf("failed to parse api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) } } @@ -134,7 +134,7 @@ func (l *LocalApiClientCfg) Load() error { err = dec.Decode(&l.Credentials) if err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + return fmt.Errorf("failed to parse api client credential configuration file '%s': %w", l.CredentialsFilePath, err) } } diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 96945202aa8..dff3c3afc8c 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -101,7 +101,7 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { CredentialsFilePath: "./testdata/bad_lapi-secrets.yaml", }, expected: &ApiCredentialsCfg{}, - expectedErr: "failed unmarshaling api server credentials", + expectedErr: "failed to parse api server credentials", }, { name: "missing field configuration", diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 11f1f0cf68d..b69954de178 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -42,5 +42,5 @@ func TestNewCrowdSecConfig(t *testing.T) { func TestDefaultConfig(t *testing.T) { x := NewDefaultConfig() _, err := yaml.Marshal(x) - require.NoError(t, err, "failed marshaling config: %s", err) + require.NoError(t, err, "failed to serialize config: %s", err) } diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 4c14f5f7d49..21ecbf3d736 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -95,7 +95,7 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error { err = yaml.Unmarshal(yamlFile, c.ConsoleConfig) if err != nil { - return fmt.Errorf("unmarshaling console config file '%s': %w", c.ConsoleConfigPath, err) + return fmt.Errorf("parsing console config file '%s': %w", c.ConsoleConfigPath, err) } if c.ConsoleConfig.ShareCustomScenarios == nil { diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 7820595b46f..7a611a856ee 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -143,7 +143,7 @@ func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { // XXX: MakeDirs out, err := yaml.Marshal(c.ContextToSend) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) + return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0700); err != nil { diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index 947b47e3c1e..afc4ea4f044 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -52,7 +52,7 @@ func (c *Config) LoadSimulation() error { if err := dec.Decode(&simCfg); err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("while unmarshaling simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("while parsing simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) } } diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index a678d7edd49..a1e5f0a5b02 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -60,7 +60,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", + expectedErr: "while parsing simulation file './testdata/config.yaml': yaml: unmarshal errors", }, { name: "basic bad file content", @@ -71,7 +71,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", + expectedErr: "while parsing simulation file './testdata/config.yaml': yaml: unmarshal errors", }, } diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index f2179acb2c1..48f5a71f773 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -38,7 +38,7 @@ func (s *PluginSuite) readconfig() PluginConfig { require.NoError(t, err, "unable to read config file %s", s.pluginConfig) err = yaml.Unmarshal(orig, &config) - require.NoError(t, err, "unable to unmarshal config file") + require.NoError(t, err, "unable to parse config file") return config } @@ -46,7 +46,7 @@ func (s *PluginSuite) readconfig() PluginConfig { func (s *PluginSuite) writeconfig(config PluginConfig) { t := s.T() data, err := yaml.Marshal(&config) - require.NoError(t, err, "unable to marshal config file") + require.NoError(t, err, "unable to serialize config file") err = os.WriteFile(s.pluginConfig, data, 0o644) require.NoError(t, err, "unable to write config file %s", s.pluginConfig) diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index a4e81e2c3e2..f74a794a512 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -79,7 +79,7 @@ func (h *Hub) parseIndex() error { } if err := json.Unmarshal(bidx, &h.items); err != nil { - return fmt.Errorf("failed to unmarshal index: %w", err) + return fmt.Errorf("failed to parse index: %w", err) } h.logger.Debugf("%d item types in hub index", len(ItemTypes)) diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 81d41d55971..7ed14086adf 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -210,7 +210,7 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { err = yaml.Unmarshal(itemContent, &itemName) if err != nil { - return nil, fmt.Errorf("failed to unmarshal %s: %w", path, err) + return nil, fmt.Errorf("failed to parse %s: %w", path, err) } if itemName.Name != "" { diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 76127065791..1b8122060d1 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -202,7 +202,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field hubstate", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Hubstate); err != nil { - return fmt.Errorf("unmarshal field hubstate: %w", err) + return fmt.Errorf("parsing field hubstate: %w", err) } } case machine.FieldDatasources: @@ -210,7 +210,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field datasources", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Datasources); err != nil { - return fmt.Errorf("unmarshal field datasources: %w", err) + return fmt.Errorf("parsing field datasources: %w", err) } } default: diff --git a/pkg/database/errors.go b/pkg/database/errors.go index 8e96f52d7ce..77f92707e51 100644 --- a/pkg/database/errors.go +++ b/pkg/database/errors.go @@ -13,8 +13,8 @@ var ( ItemNotFound = errors.New("object not found") ParseTimeFail = errors.New("unable to parse time") ParseDurationFail = errors.New("unable to parse duration") - MarshalFail = errors.New("unable to marshal") - UnmarshalFail = errors.New("unable to unmarshal") + MarshalFail = errors.New("unable to serialize") + UnmarshalFail = errors.New("unable to parse") BulkError = errors.New("unable to insert bulk") ParseType = errors.New("unable to parse type") InvalidIPOrRange = errors.New("invalid ip address / range") diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index 4156def06d7..e42c1e23455 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -57,7 +57,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("parsing: %v", err) } for _, appsecRulesFile := range configFileData.AppsecRules { @@ -70,7 +70,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, appsecRuleData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("parsing: %v", err) } appsecRuleName := appsecRuleData.Name diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 42792413b5d..bc9c8955d0d 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -111,7 +111,7 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %w", err) + return nil, fmt.Errorf("parsing: %w", err) } parserAssertFilePath := filepath.Join(testPath, ParserAssertFileName) @@ -201,7 +201,7 @@ func (t *HubTestItem) InstallHub() error { b, err := yaml.Marshal(n) if err != nil { - return fmt.Errorf("unable to marshal overrides: %w", err) + return fmt.Errorf("unable to serialize overrides: %w", err) } tgtFilename := fmt.Sprintf("%s/parsers/s00-raw/00_overrides.yaml", t.RuntimePath) diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 989e03944c3..1da906cb555 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -136,7 +136,7 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er } if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { - t.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + t.Fatalf("failed to parse %s : %s", stagecfg, err) } files := []string{} @@ -201,7 +201,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res var ts time.Time if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { - t.Fatalf("Failed to unmarshal time from input event : %s", err) + t.Fatalf("Failed to parse time from input event : %s", err) } if latest_ts.IsZero() { diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1ae70fbfab3..1b62b29dc3c 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -493,7 +493,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if err := json.Unmarshal(body, &state); err != nil { - return fmt.Errorf("can't unmarshal state file %s: %w", file, err) + return fmt.Errorf("can't parse state file %s: %w", file, err) } for k, v := range state { diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 673b372d81e..053f9be05da 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -132,7 +132,7 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) }) bbuckets, err := json.MarshalIndent(serialized, "", " ") if err != nil { - return "", fmt.Errorf("failed to unmarshal buckets: %s", err) + return "", fmt.Errorf("failed to parse buckets: %s", err) } size, err := tmpFd.Write(bbuckets) if err != nil { @@ -203,7 +203,7 @@ func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, par var d time.Time err = d.UnmarshalText([]byte(parsed.MarshaledTime)) if err != nil { - holder.logger.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) + holder.logger.Warningf("Failed to parse event time (%s) : %v", parsed.MarshaledTime, err) } if d.After(lastTs.Add(bucket.Duration)) { bucket.logger.Tracef("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, lastTs.Add(bucket.Duration)) diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index e67698e8473..39b0e6a0ec4 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -231,7 +231,7 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { raw, err := evt.Time.MarshalText() if err != nil { - log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) + log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) } else { *ovflwEvent.Timestamp = string(raw) } @@ -286,12 +286,12 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { */ start_at, err := leaky.First_ts.MarshalText() if err != nil { - log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) + log.Warningf("failed to serialize start ts %s : %s", leaky.First_ts.String(), err) } stop_at, err := leaky.Ovflw_ts.MarshalText() if err != nil { - log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) + log.Warningf("failed to serialize ovflw ts %s : %s", leaky.First_ts.String(), err) } capacity := int32(leaky.Capacity) diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go index e72bb1a464c..34073d1cc5c 100644 --- a/pkg/leakybucket/timemachine.go +++ b/pkg/leakybucket/timemachine.go @@ -24,7 +24,7 @@ func TimeMachinePour(l *Leaky, msg types.Event) { err = d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + log.Warningf("Failed to parse event time (%s) : %v", msg.MarshaledTime, err) return } diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index 7558f696dc7..d13e57856f9 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -23,7 +23,7 @@ func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *type err := d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + log.Warningf("Failed to parse event time (%s) : %v", msg.MarshaledTime, err) d = now } diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 748a466d7c3..40c8de39da5 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -18,7 +18,7 @@ func parseDateWithFormat(date, format string) (string, time.Time) { } retstr, err := t.MarshalText() if err != nil { - log.Warningf("Failed marshaling '%v'", t) + log.Warningf("Failed to serialize '%v'", t) return "", time.Time{} } return string(retstr), t @@ -98,7 +98,7 @@ func ParseDate(in string, p *types.Event, plog *log.Entry) (map[string]string, e now := time.Now().UTC() retstr, err := now.MarshalText() if err != nil { - plog.Warning("Failed marshaling current time") + plog.Warning("Failed to serialize current time") return ret, err } ret["MarshaledTime"] = string(retstr) diff --git a/pkg/parser/enrich_unmarshal.go b/pkg/parser/enrich_unmarshal.go index 7ff91b70aea..dbdd9d3f583 100644 --- a/pkg/parser/enrich_unmarshal.go +++ b/pkg/parser/enrich_unmarshal.go @@ -11,7 +11,7 @@ import ( func unmarshalJSON(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { err := json.Unmarshal([]byte(p.Line.Raw), &p.Unmarshaled) if err != nil { - plog.Errorf("could not unmarshal JSON: %s", err) + plog.Errorf("could not parse JSON: %s", err) return nil, err } plog.Tracef("unmarshaled JSON: %+v", p.Unmarshaled) diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 0542c69c049..269d51a1ba2 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -132,7 +132,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { - return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) + return fmt.Errorf("failed to parse %s: %w", parser_cfg_file, err) } pnodes, err = LoadStages(parser_configs, pctx, ectx) diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 01368091a6b..073b221b10c 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -545,7 +545,7 @@ func Detect(detectReader io.Reader, opts DetectOptions) (Setup, error) { // } // err = yaml.Unmarshal(svc.AcquisYAML, svc.DataSource) // if err != nil { - // return Setup{}, fmt.Errorf("while unmarshaling datasource for service %s: %w", name, err) + // return Setup{}, fmt.Errorf("while parsing datasource for service %s: %w", name, err) // } // } diff --git a/pkg/setup/install.go b/pkg/setup/install.go index fc5bd380fd9..d63a1ee1775 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -40,7 +40,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { dec2.KnownFields(true) if err := dec2.Decode(&ret); err != nil { - return ret, fmt.Errorf("while unmarshaling setup file: %w", err) + return ret, fmt.Errorf("while parsing setup file: %w", err) } return ret, nil diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index 2106d3ab6b2..f832ac572d2 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -819,6 +819,6 @@ update-notifier-motd.timer enabled enabled setup: alsdk al; sdf EOT - assert_output "while unmarshaling setup file: yaml: line 2: could not find expected ':'" + assert_output "while parsing setup file: yaml: line 2: could not find expected ':'" assert_stderr --partial "invalid setup file" } From f97b9c84da473ac60249bd2d6d7a865a4bdf7310 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 15:19:30 +0200 Subject: [PATCH 287/318] CI: update golangci-lint to v1.61, yq to 4.44.3 (#3241) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- test/bats.mk | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index e70d6e352f1..a31e42cf702 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -57,6 +57,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.60 + version: v1.61 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index df5c0b4fb88..d4e3a3d843a 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -163,6 +163,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.60 + version: v1.61 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index 78b666d25b4..e90f3841ffd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -185,6 +185,7 @@ linters: # DEPRECATED by golangi-lint # - execinquery + - exportloopref # # Redundant @@ -217,7 +218,6 @@ linters: # - durationcheck # check for two durations multiplied together # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - exportloopref # checks for pointers to enclosing loop variables # - ginkgolinter # enforces standards of using ginkgo and gomega # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code diff --git a/Dockerfile b/Dockerfile index 93ba1010fc2..450ea69017f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core cd re2-${RE2_VERSION} && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.43.1 + go install github.com/mikefarah/yq/v4@v4.44.3 COPY . . diff --git a/Dockerfile.debian b/Dockerfile.debian index dd55d2c3e48..8bf2698c786 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -21,7 +21,7 @@ RUN apt-get update && \ make && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.43.1 + go install github.com/mikefarah/yq/v4@v4.44.3 COPY . . diff --git a/test/bats.mk b/test/bats.mk index 631cc55579b..72ac8863f72 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -67,8 +67,8 @@ bats-check-requirements: ## Check dependencies for functional tests @$(TEST_DIR)/bin/check-requirements bats-update-tools: ## Install/update tools required for functional tests - # yq v4.43.1 - GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@c35ec752e38ea0c096d3c44e13cfc0797ac394d8 + # yq v4.44.3 + GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@bbdd97482f2d439126582a59689eb1c855944955 # cfssl v1.6.5 GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda From 5f22c78fcf9d3f70c89cde702974bd42236f55dd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 16:10:56 +0200 Subject: [PATCH 288/318] enable linters: copyloopvar, intrange (#3184) * enable linters: copyloopvar, intrange * lint --- .golangci.yml | 13 +++---------- pkg/acquisition/acquisition.go | 4 ++-- pkg/acquisition/modules/docker/utils.go | 2 +- pkg/apiclient/decisions_service.go | 2 +- pkg/apiserver/apic_test.go | 5 +++-- pkg/csplugin/broker_win_test.go | 1 - pkg/csplugin/utils_windows.go | 2 +- pkg/csplugin/utils_windows_test.go | 1 - pkg/cwhub/sync.go | 2 +- pkg/leakybucket/manager_run.go | 2 +- pkg/setup/detect_test.go | 1 - 11 files changed, 13 insertions(+), 22 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e90f3841ffd..1787f0487a4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -197,12 +197,9 @@ linters: - funlen # revive - gocognit # revive - # - # Disabled until fixed for go 1.22 - # + # Disabled atm - - copyloopvar # copyloopvar is a linter detects places where loop variables are copied - - intrange # intrange is a linter to find places where for loops could make use of an integer range. + - intrange # intrange is a linter to find places where for loops could make use of an integer range. # # Enabled @@ -212,6 +209,7 @@ linters: # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully + # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages # - dupword # checks for duplicate words in the source code @@ -490,11 +488,6 @@ issues: path: "cmd/crowdsec-cli/idgen/password.go" text: "deep-exit: .*" - - linters: - - revive - path: "cmd/crowdsec-cli/utils.go" - text: "deep-exit: .*" - - linters: - revive path: "pkg/leakybucket/overflows.go" diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index a737881dd4d..b2493bbb9b7 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -304,7 +304,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector - for i := range len(sources) { + for i := range sources { if aggregated { metrics = sources[i].GetMetrics() } else { @@ -378,7 +378,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb return nil } - for i := range len(sources) { + for i := range sources { subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go index c724f581194..6a0d494097f 100644 --- a/pkg/acquisition/modules/docker/utils.go +++ b/pkg/acquisition/modules/docker/utils.go @@ -22,7 +22,7 @@ func parseKeyToMap(m map[string]interface{}, key string, value string) { return } - for i := range len(parts) { + for i := range parts { if parts[i] == "" { return } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index 388a870f999..98f26cad9ae 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -144,7 +144,7 @@ func (s *DecisionsService) FetchV3Decisions(ctx context.Context, url string) (*m partialDecisions := make([]*models.Decision, len(decisionsGroup.Decisions)) for idx, decision := range decisionsGroup.Decisions { - decision := decision // fix exportloopref linter message + decision := decision //nolint:copyloopvar // fix exportloopref linter message partialDecisions[idx] = &models.Decision{ Scenario: &scenarioDeleted, Scope: decisionsGroup.Scope, diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 51887006ad4..058e25079e0 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1091,7 +1091,6 @@ func TestAPICPush(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { api := getAPIC(t) api.pushInterval = time.Millisecond @@ -1114,8 +1113,10 @@ func TestAPICPush(t *testing.T) { httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{})) + // capture the alerts to avoid datarace + alerts := tc.alerts go func() { - api.AlertsAddChan <- tc.alerts + api.AlertsAddChan <- alerts time.Sleep(time.Second) api.Shutdown() diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index 97a3ad33deb..b7956bdcc0a 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -54,7 +54,6 @@ func (s *PluginSuite) TestBrokerInit() { } for _, tc := range tests { - tc := tc s.Run(tc.name, func() { t := s.T() if tc.action != nil { diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index 8d4956ceeeb..91002079398 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -116,7 +116,7 @@ func CheckPerms(path string) error { */ aceCount := rs.Field(3).Uint() - for i := uint64(0); i < aceCount; i++ { + for i := range aceCount { ace := &AccessAllowedAce{} ret, _, _ := procGetAce.Call(uintptr(unsafe.Pointer(dacl)), uintptr(i), uintptr(unsafe.Pointer(&ace))) if ret == 0 { diff --git a/pkg/csplugin/utils_windows_test.go b/pkg/csplugin/utils_windows_test.go index 6a76e1215e5..1eb4dfb9033 100644 --- a/pkg/csplugin/utils_windows_test.go +++ b/pkg/csplugin/utils_windows_test.go @@ -37,7 +37,6 @@ func TestGetPluginNameAndTypeFromPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, got1, err := getPluginTypeAndSubtypeFromPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 7ed14086adf..c82822e64ef 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -24,7 +24,7 @@ func isYAMLFileName(path string) bool { // returns error if the symlink is dangling or too many symlinks are followed func resolveSymlink(path string) (string, error) { const maxSymlinks = 10 // Prevent infinite loops - for i := 0; i < maxSymlinks; i++ { + for range maxSymlinks { fi, err := os.Lstat(path) if err != nil { return "", err // dangling link diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 053f9be05da..2858d8b5635 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event)) } //find the relevant holders (scenarios) - for idx := range len(holders) { + for idx := range holders { //for idx, holder := range holders { //evaluate bucket's condition diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 6f61b5dac78..588e74dab54 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -184,7 +184,6 @@ func TestNormalizeVersion(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.version, func(t *testing.T) { t.Parallel() actual := setup.NormalizeVersion(tc.version) From b93b240bd4a7f094f1143db19553dc442028cb79 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 17:07:30 +0200 Subject: [PATCH 289/318] lint: enable (some) gocritic checks (#3238) * lint: gocritic configuration * lint: octal literals * lint: gocritic (filepath.Join) * ling: gocritic (badRegexp, regexpSimplify) * lint: gocritic (preferStringWriter) * lint: gocritic (emtpyStringTest) * filepath.Clean(a+b) -> filepath.Join(a, b) * gofumpt --- .golangci.yml | 47 ++++++++++++------- cmd/crowdsec-cli/clialert/alerts.go | 2 +- cmd/crowdsec-cli/clibouncer/bouncers.go | 4 +- cmd/crowdsec-cli/clihub/utils_table.go | 8 ++-- cmd/crowdsec-cli/climachine/machines.go | 6 +-- cmd/crowdsec-cli/climetrics/list.go | 2 +- cmd/crowdsec-cli/climetrics/statacquis.go | 4 +- cmd/crowdsec-cli/climetrics/statalert.go | 4 +- .../climetrics/statappsecengine.go | 4 +- cmd/crowdsec-cli/climetrics/statappsecrule.go | 2 +- cmd/crowdsec-cli/climetrics/statbucket.go | 4 +- cmd/crowdsec-cli/climetrics/statdecision.go | 4 +- cmd/crowdsec-cli/climetrics/statlapi.go | 4 +- .../climetrics/statlapibouncer.go | 4 +- .../climetrics/statlapidecision.go | 4 +- .../climetrics/statlapimachine.go | 4 +- cmd/crowdsec-cli/climetrics/statparser.go | 4 +- cmd/crowdsec-cli/climetrics/statstash.go | 4 +- cmd/crowdsec-cli/climetrics/statwhitelist.go | 4 +- cmd/crowdsec-cli/idgen/password.go | 2 +- cmd/crowdsec/pour.go | 2 +- .../modules/cloudwatch/cloudwatch.go | 21 ++++----- pkg/acquisition/modules/docker/docker.go | 4 +- pkg/acquisition/modules/file/file.go | 6 +-- .../modules/journalctl/journalctl.go | 7 +-- pkg/acquisition/modules/s3/s3.go | 15 +++--- pkg/apiclient/resperr.go | 4 +- pkg/apiserver/controllers/v1/decisions.go | 23 +++++---- pkg/csconfig/config_paths.go | 10 ++-- pkg/csconfig/crowdsec_service.go | 4 +- pkg/csconfig/simulation.go | 2 +- pkg/csplugin/listfiles_test.go | 2 +- pkg/csplugin/utils.go | 4 +- pkg/cwhub/cwhub_test.go | 2 +- pkg/database/utils.go | 5 +- pkg/dumps/parser_dump.go | 6 +-- pkg/exprhelpers/debugger.go | 5 +- pkg/exprhelpers/helpers.go | 5 +- pkg/fflag/features.go | 2 +- pkg/hubtest/hubtest.go | 4 +- pkg/hubtest/nucleirunner.go | 6 +-- pkg/hubtest/regexp.go | 4 +- pkg/hubtest/utils.go | 2 +- pkg/longpollclient/client.go | 6 +-- pkg/setup/units.go | 2 +- 45 files changed, 143 insertions(+), 131 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1787f0487a4..4918fb99fe3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -178,6 +178,37 @@ linters-settings: # Allow blocks to end with comments allow-trailing-comment: true + gocritic: + enable-all: true + disabled-checks: + - typeDefFirst + - paramTypeCombine + - httpNoBody + - ifElseChain + - importShadow + - hugeParam + - rangeValCopy + - commentedOutCode + - commentedOutImport + - unnamedResult + - sloppyReassign + - appendCombine + - captLocal + - typeUnparen + - commentFormatting + - deferInLoop # + - sprintfQuotedString # + - whyNoLint + - equalFold # + - unnecessaryBlock # + - ptrToRefParam # + - stringXbytes # + - appendAssign # + - tooManyResultsChecker + - unnecessaryDefer + - docStub + - preferFprint + linters: enable-all: true disable: @@ -363,22 +394,6 @@ issues: - errcheck text: "Error return value of `.*` is not checked" - - linters: - - gocritic - text: "ifElseChain: rewrite if-else to switch statement" - - - linters: - - gocritic - text: "captLocal: `.*' should not be capitalized" - - - linters: - - gocritic - text: "appendAssign: append result not assigned to the same slice" - - - linters: - - gocritic - text: "commentFormatting: put a space between `//` and comment text" - # Will fix, trivial - just beware of merge conflicts - linters: diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 006d7ac7d8c..0965e1e13d0 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -44,7 +44,7 @@ func decisionsFromAlert(alert *models.Alert) string { } for _, key := range maptools.SortedKeys(decMap) { - if len(ret) > 0 { + if ret != "" { ret += " " } diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 25c80d16404..0c0fc8851c9 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -97,7 +97,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } // bouncerInfo contains only the data we want for inspect/list @@ -443,7 +443,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/clihub/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go index 018071d91bb..98f14341b10 100644 --- a/cmd/crowdsec-cli/clihub/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -22,7 +22,7 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw } io.WriteString(out, title+"\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -35,7 +35,7 @@ func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metric }) io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -55,7 +55,7 @@ func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metr }) io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { @@ -80,6 +80,6 @@ func parserMetricsTable(out io.Writer, wantColor string, itemName string, metric if showTable { io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } } diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index 3df176d786d..30948f43056 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -123,7 +123,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } } @@ -145,7 +145,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } // machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. @@ -611,7 +611,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index ba827634052..ddb2baac14d 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -64,7 +64,7 @@ func (cli *cliMetrics) list() error { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index 827dcf036c3..0af2e796f40 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -37,8 +37,8 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index e48dd6c924f..942eceaa75c 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -38,8 +38,8 @@ func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index 4a249e11687..d924375247f 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -34,8 +34,8 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index 2f859d70cfb..e06a7c2e2b3 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -41,7 +41,7 @@ func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, show log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 507d9f3a476..1882fe21df1 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -35,8 +35,8 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index 145665cfba2..b862f49ff12 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -53,8 +53,8 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 45b384708bf..9559eacf0f4 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -49,8 +49,8 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 828ccb33413..5e5f63a79d3 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -35,8 +35,8 @@ func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index ffc999555c1..44f0e8f4b87 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -57,8 +57,8 @@ func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, sh if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 09abe2dd44b..0e6693bea82 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -35,8 +35,8 @@ func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 0b3512052b9..520e68f9adf 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -36,8 +36,8 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 5938ac05fc8..2729de931a1 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -52,8 +52,8 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index ccb7e52153b..7f533b45b4b 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -36,8 +36,8 @@ func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showE log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go index cd798fdcc48..e0faa4daacc 100644 --- a/cmd/crowdsec-cli/idgen/password.go +++ b/cmd/crowdsec-cli/idgen/password.go @@ -1,8 +1,8 @@ package idgen import ( - "math/big" saferand "crypto/rand" + "math/big" log "github.com/sirupsen/logrus" ) diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 1382a909ab3..2fc7d7e42c9 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -59,7 +59,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea globalBucketPourKo.Inc() } - if len(parsed.MarshaledTime) != 0 { + if parsed.MarshaledTime != "" { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { log.Warningf("failed to parse time from event : %s", err) } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1a78ae6fa7a..d6f33b68050 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -57,16 +57,16 @@ type CloudwatchSource struct { // CloudwatchSourceConfiguration allows user to define one or more streams to monitor within a cloudwatch log group type CloudwatchSourceConfiguration struct { configuration.DataSourceCommonCfg `yaml:",inline"` - GroupName string `yaml:"group_name"` //the group name to be monitored - StreamRegexp *string `yaml:"stream_regexp,omitempty"` //allow to filter specific streams + GroupName string `yaml:"group_name"` // the group name to be monitored + StreamRegexp *string `yaml:"stream_regexp,omitempty"` // allow to filter specific streams StreamName *string `yaml:"stream_name,omitempty"` StartTime, EndTime *time.Time `yaml:"-"` - DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` //batch size for DescribeLogStreamsPagesWithContext + DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` // batch size for DescribeLogStreamsPagesWithContext GetLogEventsPagesLimit *int64 `yaml:"getlogeventspages_limit,omitempty"` - PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` //frequency at which we poll for new streams within the log group - MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` //monitor only streams that have been updated within $duration - PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` //frequency at which we poll each stream - StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` //stop monitoring streams that haven't been updated within $duration, might be reopened later tho + PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` // frequency at which we poll for new streams within the log group + MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` // monitor only streams that have been updated within $duration + PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` // frequency at which we poll each stream + StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` // stop monitoring streams that haven't been updated within $duration, might be reopened later tho AwsApiCallTimeout *time.Duration `yaml:"aws_api_timeout,omitempty"` AwsProfile *string `yaml:"aws_profile,omitempty"` PrependCloudwatchTimestamp *bool `yaml:"prepend_cloudwatch_timestamp,omitempty"` @@ -86,7 +86,7 @@ type LogStreamTailConfig struct { logger *log.Entry ExpectMode int t tomb.Tomb - StartTime, EndTime time.Time //only used for CatMode + StartTime, EndTime time.Time // only used for CatMode } var ( @@ -111,7 +111,7 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return fmt.Errorf("cannot parse CloudwatchSource configuration: %w", err) } - if len(cw.Config.GroupName) == 0 { + if cw.Config.GroupName == "" { return errors.New("group_name is mandatory for CloudwatchSource") } @@ -357,7 +357,6 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error { - cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) @@ -638,7 +637,7 @@ func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tom func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string - var head = true + head := true /*convert the times*/ startTime := cfg.StartTime.UTC().Unix() * 1000 endTime := cfg.EndTime.UTC().Unix() * 1000 diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 9a6e13feee4..44fee0a99a2 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -397,14 +397,13 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container for _, containerName := range d.Config.ContainerName { for _, name := range container.Names { - if strings.HasPrefix(name, "/") && len(name) > 0 { + if strings.HasPrefix(name, "/") && name != "" { name = name[1:] } if name == containerName { return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } - } for _, cont := range d.compiledContainerID { @@ -419,7 +418,6 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } - } if d.Config.UseContainerLabels { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 4f7880baa89..85b4c1b5b32 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -73,7 +73,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { f.logger.Tracef("FileAcquisition configuration: %+v", f.config) } - if len(f.config.Filename) != 0 { + if f.config.Filename != "" { f.config.Filenames = append(f.config.Filenames, f.config.Filename) } @@ -202,11 +202,11 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger args := strings.Split(dsn, "?") - if len(args[0]) == 0 { + if args[0] == "" { return errors.New("empty file:// DSN") } - if len(args) == 2 && len(args[1]) != 0 { + if len(args) == 2 && args[1] != "" { params, err := url.ParseQuery(args[1]) if err != nil { return fmt.Errorf("could not parse file args: %w", err) diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 762dfe9ba12..1336fac4578 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -223,7 +223,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, } qs := strings.TrimPrefix(dsn, "journalctl://") - if len(qs) == 0 { + if qs == "" { return errors.New("empty journalctl:// DSN") } @@ -267,7 +267,6 @@ func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb err := j.runJournalCtl(out, t) j.logger.Debug("Oneshot journalctl acquisition is done") return err - } func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { @@ -277,11 +276,13 @@ func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.To }) return nil } + func (j *JournalCtlSource) CanRun() error { - //TODO: add a more precise check on version or something ? + // TODO: add a more precise check on version or something ? _, err := exec.LookPath(journalctlCmd) return err } + func (j *JournalCtlSource) Dump() interface{} { return j } diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 9ef4d2ba757..a9835ab4974 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -93,10 +93,12 @@ type S3Event struct { } `json:"detail"` } -const PollMethodList = "list" -const PollMethodSQS = "sqs" -const SQSFormatEventBridge = "eventbridge" -const SQSFormatS3Notification = "s3notification" +const ( + PollMethodList = "list" + PollMethodSQS = "sqs" + SQSFormatEventBridge = "eventbridge" + SQSFormatS3Notification = "s3notification" +) var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -467,6 +469,7 @@ func (s *S3Source) GetUuid() string { func (s *S3Source) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, objectsRead, sqsMessagesReceived} } + func (s *S3Source) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, objectsRead, sqsMessagesReceived} } @@ -567,11 +570,11 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * }) dsn = strings.TrimPrefix(dsn, "s3://") args := strings.Split(dsn, "?") - if len(args[0]) == 0 { + if args[0] == "" { return errors.New("empty s3:// DSN") } - if len(args) == 2 && len(args[1]) != 0 { + if len(args) == 2 && args[1] != "" { params, err := url.ParseQuery(args[1]) if err != nil { return fmt.Errorf("could not parse s3 args: %w", err) diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index 00689147332..1b0786f9882 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -19,7 +19,7 @@ func (e *ErrorResponse) Error() string { message := ptr.OrEmpty(e.Message) errors := "" - if len(e.Errors) > 0 { + if e.Errors != "" { errors = fmt.Sprintf(" (%s)", e.Errors) } @@ -51,7 +51,7 @@ func CheckResponse(r *http.Response) error { // try to unmarshal and if there are no 'message' or 'errors' fields, display the body as is, // the API is following a different convention err := json.Unmarshal(data, ret) - if err != nil || (ret.Message == nil && len(ret.Errors) == 0) { + if err != nil || (ret.Message == nil && ret.Errors == "") { ret.Message = ptr.Of(fmt.Sprintf("http code %d, response: %s", r.StatusCode, string(data))) return ret } diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 3d8e0232224..54e9b0290cc 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -160,7 +160,7 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun if needComma { //respBuffer.Write([]byte(",")) - gctx.Writer.Write([]byte(",")) + gctx.Writer.WriteString(",") } else { needComma = true } @@ -212,7 +212,7 @@ func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPul if needComma { //respBuffer.Write([]byte(",")) - gctx.Writer.Write([]byte(",")) + gctx.Writer.WriteString(",") } else { needComma = true } @@ -244,7 +244,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B gctx.Writer.Header().Set("Content-Type", "application/json") gctx.Writer.Header().Set("Transfer-Encoding", "chunked") gctx.Writer.WriteHeader(http.StatusOK) - gctx.Writer.Write([]byte(`{"new": [`)) //No need to check for errors, the doc says it always returns nil + gctx.Writer.WriteString(`{"new": [`) //No need to check for errors, the doc says it always returns nil // if the blocker just started, return all decisions if val, ok := gctx.Request.URL.Query()["startup"]; ok && val[0] == "true" { @@ -252,48 +252,47 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B err := writeStartupDecisions(gctx, filters, c.DBClient.QueryAllDecisionsWithFilters) if err != nil { log.Errorf("failed sending new decisions for startup: %v", err) - gctx.Writer.Write([]byte(`], "deleted": []}`)) + gctx.Writer.WriteString(`], "deleted": []}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`], "deleted": [`)) + gctx.Writer.WriteString(`], "deleted": [`) //Expired decisions err = writeStartupDecisions(gctx, filters, c.DBClient.QueryExpiredDecisionsWithFilters) if err != nil { log.Errorf("failed sending expired decisions for startup: %v", err) - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString(`]}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString(`]}`) gctx.Writer.Flush() } else { err = writeDeltaDecisions(gctx, filters, bouncerInfo.LastPull, c.DBClient.QueryNewDecisionsSinceWithFilters) if err != nil { log.Errorf("failed sending new decisions for delta: %v", err) - gctx.Writer.Write([]byte(`], "deleted": []}`)) + gctx.Writer.WriteString(`], "deleted": []}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`], "deleted": [`)) + gctx.Writer.WriteString(`], "deleted": [`) err = writeDeltaDecisions(gctx, filters, bouncerInfo.LastPull, c.DBClient.QueryExpiredDecisionsSinceWithFilters) - if err != nil { log.Errorf("failed sending expired decisions for delta: %v", err) - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString("]}") gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString("]}") gctx.Writer.Flush() } diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 7675b90d7dd..a8d39a664f3 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -10,7 +10,7 @@ type ConfigurationPaths struct { ConfigDir string `yaml:"config_dir"` DataDir string `yaml:"data_dir,omitempty"` SimulationFilePath string `yaml:"simulation_path,omitempty"` - HubIndexFile string `yaml:"index_path,omitempty"` //path of the .index.json + HubIndexFile string `yaml:"index_path,omitempty"` // path of the .index.json HubDir string `yaml:"hub_dir,omitempty"` PluginDir string `yaml:"plugin_dir,omitempty"` NotificationDir string `yaml:"notification_dir,omitempty"` @@ -28,18 +28,18 @@ func (c *Config) loadConfigurationPaths() error { } if c.ConfigPaths.HubDir == "" { - c.ConfigPaths.HubDir = filepath.Clean(c.ConfigPaths.ConfigDir + "/hub") + c.ConfigPaths.HubDir = filepath.Join(c.ConfigPaths.ConfigDir, "hub") } if c.ConfigPaths.HubIndexFile == "" { - c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") + c.ConfigPaths.HubIndexFile = filepath.Join(c.ConfigPaths.HubDir, ".index.json") } if c.ConfigPaths.PatternDir == "" { - c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns/") + c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns") } - var configPathsCleanup = []*string{ + configPathsCleanup := []*string{ &c.ConfigPaths.HubDir, &c.ConfigPaths.HubIndexFile, &c.ConfigPaths.ConfigDir, diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 7a611a856ee..cf796805dee 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -146,11 +146,11 @@ func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } - if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0700); err != nil { + if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0o700); err != nil { return fmt.Errorf("while creating directories for %s: %w", c.ConsoleContextPath, err) } - if err := os.WriteFile(c.ConsoleContextPath, out, 0600); err != nil { + if err := os.WriteFile(c.ConsoleContextPath, out, 0o600); err != nil { return fmt.Errorf("while dumping console config to %s: %w", c.ConsoleContextPath, err) } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index afc4ea4f044..c9041df464a 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -37,7 +37,7 @@ func (c *Config) LoadSimulation() error { simCfg := SimulationConfig{} if c.ConfigPaths.SimulationFilePath == "" { - c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") + c.ConfigPaths.SimulationFilePath = filepath.Join(c.ConfigPaths.ConfigDir, "simulation.yaml") } patcher := yamlpatch.NewPatcher(c.ConfigPaths.SimulationFilePath, ".local") diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index a4188804149..c476d7a4e4a 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -21,7 +21,7 @@ func TestListFilesAtPath(t *testing.T) { require.NoError(t, err) _, err = os.Create(filepath.Join(dir, "slack")) require.NoError(t, err) - err = os.Mkdir(filepath.Join(dir, "somedir"), 0755) + err = os.Mkdir(filepath.Join(dir, "somedir"), 0o755) require.NoError(t, err) _, err = os.Create(filepath.Join(dir, "somedir", "inner")) require.NoError(t, err) diff --git a/pkg/csplugin/utils.go b/pkg/csplugin/utils.go index 2e7f0c80528..571d78add56 100644 --- a/pkg/csplugin/utils.go +++ b/pkg/csplugin/utils.go @@ -123,10 +123,10 @@ func pluginIsValid(path string) error { mode := details.Mode() perm := uint32(mode) - if (perm & 00002) != 0 { + if (perm & 0o0002) != 0 { return fmt.Errorf("plugin at %s is world writable, world writable plugins are invalid", path) } - if (perm & 00020) != 0 { + if (perm & 0o0020) != 0 { return fmt.Errorf("plugin at %s is group writable, group writable plugins are invalid", path) } if (mode & os.ModeSetgid) != 0 { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index a4641483622..17e7a0dc723 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -146,7 +146,7 @@ func setResponseByPath() { "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), "/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), - "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), + "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), "/crowdsecurity/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, "/crowdsecurity/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true diff --git a/pkg/database/utils.go b/pkg/database/utils.go index f1c06565635..8148df56f24 100644 --- a/pkg/database/utils.go +++ b/pkg/database/utils.go @@ -42,7 +42,8 @@ func LastAddress(n *net.IPNet) net.IP { ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], - ip[15] | ^n.Mask[15]} + ip[15] | ^n.Mask[15], + } } return net.IPv4( @@ -74,7 +75,7 @@ func ParseDuration(d string) (time.Duration, error) { if strings.HasSuffix(d, "d") { days := strings.Split(d, "d")[0] - if len(days) == 0 { + if days == "" { return 0, fmt.Errorf("'%s' can't be parsed as duration", d) } diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index d43f3cdc1b9..bc8f78dc203 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -259,7 +259,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if updated > 0 { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } @@ -267,7 +267,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if deleted > 0 { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } @@ -275,7 +275,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if whitelisted { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 711aa491078..2e47af6d1de 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -53,9 +53,8 @@ type OpOutput struct { } func (o *OpOutput) String() string { - ret := fmt.Sprintf("%*c", o.CodeDepth, ' ') - if len(o.Code) != 0 { + if o.Code != "" { ret += fmt.Sprintf("[%s]", o.Code) } ret += " " @@ -70,7 +69,7 @@ func (o *OpOutput) String() string { indent = 0 } ret = fmt.Sprintf("%*cBLOCK_END [%s]", indent, ' ', o.Code) - if len(o.StrConditionResult) > 0 { + if o.StrConditionResult != "" { ret += fmt.Sprintf(" -> %s", o.StrConditionResult) } return ret diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 17ce468f623..2ca7d0be79a 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -213,7 +213,7 @@ func FileInit(fileFolder string, filename string, fileType string) error { if strings.HasPrefix(scanner.Text(), "#") { // allow comments continue } - if len(scanner.Text()) == 0 { //skip empty lines + if scanner.Text() == "" { //skip empty lines continue } @@ -254,7 +254,6 @@ func Distinct(params ...any) (any, error) { } } return ret, nil - } func FlattenDistinct(params ...any) (any, error) { @@ -280,6 +279,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { return args } + func existsInFileMaps(filename string, ftype string) (bool, error) { ok := false var err error @@ -765,7 +765,6 @@ func B64Decode(params ...any) (any, error) { } func ParseKV(params ...any) (any, error) { - blob := params[0].(string) target := params[1].(map[string]interface{}) prefix := params[2].(string) diff --git a/pkg/fflag/features.go b/pkg/fflag/features.go index 3a106984a66..c8a3d7755ea 100644 --- a/pkg/fflag/features.go +++ b/pkg/fflag/features.go @@ -97,7 +97,7 @@ type FeatureRegister struct { features map[string]*Feature } -var featureNameRexp = regexp.MustCompile(`^[a-z0-9_\.]+$`) +var featureNameRexp = regexp.MustCompile(`^[a-z0-9_.]+$`) func validateFeatureName(featureName string) error { if featureName == "" { diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index a4ca275c310..93f5abaa879 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -83,7 +83,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT } if isAppsecTest { - HubTestPath := filepath.Join(hubPath, "./.appsec-tests/") + HubTestPath := filepath.Join(hubPath, ".appsec-tests") hubIndexFile := filepath.Join(hubPath, ".index.json") local := &csconfig.LocalHubCfg{ @@ -119,7 +119,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT }, nil } - HubTestPath := filepath.Join(hubPath, "./.tests/") + HubTestPath := filepath.Join(hubPath, ".tests") hubIndexFile := filepath.Join(hubPath, ".index.json") diff --git a/pkg/hubtest/nucleirunner.go b/pkg/hubtest/nucleirunner.go index 0bf2013dd8d..32c81eb64d8 100644 --- a/pkg/hubtest/nucleirunner.go +++ b/pkg/hubtest/nucleirunner.go @@ -42,11 +42,11 @@ func (nc *NucleiConfig) RunNucleiTemplate(testName string, templatePath string, err := cmd.Run() - if err := os.WriteFile(outputPrefix+"_stdout.txt", out.Bytes(), 0644); err != nil { + if err := os.WriteFile(outputPrefix+"_stdout.txt", out.Bytes(), 0o644); err != nil { log.Warningf("Error writing stdout: %s", err) } - if err := os.WriteFile(outputPrefix+"_stderr.txt", outErr.Bytes(), 0644); err != nil { + if err := os.WriteFile(outputPrefix+"_stderr.txt", outErr.Bytes(), 0o644); err != nil { log.Warningf("Error writing stderr: %s", err) } @@ -56,7 +56,7 @@ func (nc *NucleiConfig) RunNucleiTemplate(testName string, templatePath string, log.Warningf("Stderr saved to %s", outputPrefix+"_stderr.txt") log.Warningf("Nuclei generated output saved to %s", outputPrefix+".json") return err - } else if len(out.String()) == 0 { + } else if out.String() == "" { log.Warningf("Stdout saved to %s", outputPrefix+"_stdout.txt") log.Warningf("Stderr saved to %s", outputPrefix+"_stderr.txt") log.Warningf("Nuclei generated output saved to %s", outputPrefix+".json") diff --git a/pkg/hubtest/regexp.go b/pkg/hubtest/regexp.go index f9165eae3d1..8b2fcc928dd 100644 --- a/pkg/hubtest/regexp.go +++ b/pkg/hubtest/regexp.go @@ -5,7 +5,7 @@ import ( ) var ( - variableRE = regexp.MustCompile(`(?P[^ =]+) == .*`) - parserResultRE = regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) + variableRE = regexp.MustCompile(`(?P[^ =]+) == .*`) + parserResultRE = regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) scenarioResultRE = regexp.MustCompile(`^results\[[0-9]+\].Overflow.Alert.GetScenario\(\) == "(?P[^"]+)"`) ) diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go index a7373fcc0bf..b42a73461f3 100644 --- a/pkg/hubtest/utils.go +++ b/pkg/hubtest/utils.go @@ -91,7 +91,7 @@ func CopyDir(src string, dest string) error { return errors.New("Source " + file.Name() + " is not a directory!") } - err = os.MkdirAll(dest, 0755) + err = os.MkdirAll(dest, 0o755) if err != nil { return err } diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 0603b7a5e80..5a7af0bfa63 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -74,11 +74,9 @@ func (c *LongPollClient) doQuery() (*http.Response, error) { } func (c *LongPollClient) poll() error { - logger := c.logger.WithField("method", "poll") resp, err := c.doQuery() - if err != nil { return err } @@ -122,7 +120,7 @@ func (c *LongPollClient) poll() error { logger.Tracef("got response: %+v", pollResp) - if len(pollResp.ErrorMessage) > 0 { + if pollResp.ErrorMessage != "" { if pollResp.ErrorMessage == timeoutMessage { logger.Debugf("got timeout message") return nil @@ -209,7 +207,7 @@ func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { c.logger.Tracef("got response: %+v", pollResp) - if len(pollResp.ErrorMessage) > 0 { + if pollResp.ErrorMessage != "" { if pollResp.ErrorMessage == timeoutMessage { c.logger.Debugf("got timeout message") break diff --git a/pkg/setup/units.go b/pkg/setup/units.go index ab1eec6f33e..861513d3f1d 100644 --- a/pkg/setup/units.go +++ b/pkg/setup/units.go @@ -35,7 +35,7 @@ func systemdUnitList() ([]string, error) { for scanner.Scan() { line := scanner.Text() - if len(line) == 0 { + if line == "" { break // the rest of the output is footer } From 519693270eba7515b0201749d8670503de527d02 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 18 Sep 2024 15:00:19 +0200 Subject: [PATCH 290/318] refact: alerts query (#3216) * refact alerts: log messages * refact: AlertPredicatesFromFilter --- .golangci.yml | 2 +- pkg/database/alerts.go | 312 ++++++++++++++++++++++------------------- 2 files changed, 170 insertions(+), 144 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 4918fb99fe3..34837437355 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -103,7 +103,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [41] + arguments: [39] - name: defer disabled: true - name: empty-block diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 3e3e480c7d6..3dfb0dc8197 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -456,14 +456,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) + c.Log.Errorf("creating alert: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) startAtTime = time.Now().UTC() } stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) + c.Log.Errorf("creating alert: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) stopAtTime = time.Now().UTC() } @@ -483,7 +483,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ for i, eventItem := range alertItem.Events { ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) + c.Log.Errorf("creating alert: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) ts = time.Now().UTC() } @@ -694,7 +694,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str return nil, fmt.Errorf("machine '%s': %w", machineID, err) } - c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineID) + c.Log.Debugf("creating alert: machine %s doesn't exist", machineID) owner = nil } @@ -724,6 +724,160 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str return alertIDs, nil } +func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { + /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok && v[0] == "false" { + *predicates = append(*predicates, alert.SimulatedEQ(false)) + } +} + +func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } +} + +func handleScopeFilter(scope string, predicates *[]predicate.Alert) { + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + + *predicates = append(*predicates, alert.SourceScopeEQ(scope)) +} + +func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { + duration, err := ParseDuration(value) + if err != nil { + return fmt.Errorf("while parsing duration: %w", err) + } + + timePoint := time.Now().UTC().Add(-duration) + if timePoint.IsZero() { + return fmt.Errorf("empty time now() - %s", timePoint.String()) + } + + switch param { + case "since": + *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) + case "created_before": + *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) + case "until": + *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) + } + + return nil +} + +func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } +} + +func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } +} + +func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { + if ip_sz == 4 { + handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz == 16 { + handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz != 0 { + return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + return nil +} + +func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { + if value == "false" { + *predicates = append(*predicates, alert.And( + // do not show alerts with active decisions having origin CAPI or lists + alert.And( + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), + ), + alert.Not( + alert.And( + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + alert.Not(alert.HasDecisions()), + alert.Or( + alert.SourceScopeHasPrefix(types.ListOrigin+":"), + alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), + ), + ), + ), + )) + } else if value != "true" { + log.Errorf("invalid bool '%s' for include_capi", value) + } + + return nil +} + func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { predicates := make([]predicate.Alert, 0) @@ -739,16 +893,8 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ - /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ - if v, ok := filter["simulated"]; ok { - if v[0] == "false" { - predicates = append(predicates, alert.SimulatedEQ(false)) - } - } - - if _, ok := filter["origin"]; ok { - filter["include_capi"] = []string{"true"} - } + handleSimulatedFilter(filter, &predicates) + handleOriginFilter(filter, &predicates) for param, value := range filter { switch param { @@ -758,14 +904,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } case "scope": - scope := value[0] - if strings.ToLower(scope) == "ip" { - scope = types.Ip - } else if strings.ToLower(scope) == "range" { - scope = types.Range - } - - predicates = append(predicates, alert.SourceScopeEQ(scope)) + handleScopeFilter(value[0], &predicates) case "value": predicates = append(predicates, alert.SourceValueEQ(value[0])) case "scenario": @@ -775,68 +914,18 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e if err != nil { return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) } - case "since": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } - - since := time.Now().UTC().Add(-duration) - if since.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", since.String()) - } - - predicates = append(predicates, alert.StartedAtGTE(since)) - case "created_before": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } + case "since", "created_before", "until": + if err := handleTimeFilters(param, value[0], &predicates); err != nil { + return nil, err - since := time.Now().UTC().Add(-duration) - if since.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", since.String()) } - - predicates = append(predicates, alert.CreatedAtLTE(since)) - case "until": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } - - until := time.Now().UTC().Add(-duration) - if until.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", until.String()) - } - - predicates = append(predicates, alert.StartedAtLTE(until)) case "decision_type": predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) case "origin": predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) case "include_capi": // allows to exclude one or more specific origins - if value[0] == "false" { - predicates = append(predicates, alert.And( - // do not show alerts with active decisions having origin CAPI or lists - alert.And( - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), - ), - alert.Not( - alert.And( - // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI - alert.Not(alert.HasDecisions()), - alert.Or( - alert.SourceScopeHasPrefix(types.ListOrigin+":"), - alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), - ), - ), - ), - ), - ) - } else if value[0] != "true" { - log.Errorf("Invalid bool '%s' for include_capi", value[0]) + if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { + return nil, err } case "has_active_decision": if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { @@ -861,72 +950,9 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } } - if ip_sz == 4 { - if contains { /*decision contains {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } - } else if ip_sz == 16 { - if contains { /*decision contains {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip < query.start_ip - alert.HasDecisionsWith(decision.StartIPLT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix <= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), - )), - alert.Or( - // decision.end_ip > query.end_ip - alert.HasDecisionsWith(decision.EndIPGT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix >= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), - ), - ), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip > query.start_ip - alert.HasDecisionsWith(decision.StartIPGT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix >= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), - )), - alert.Or( - // decision.end_ip < query.end_ip - alert.HasDecisionsWith(decision.EndIPLT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix <= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), - ), - ), - )) - } - } else if ip_sz != 0 { - return nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + return nil, err + } return predicates, nil From b14201aa840cea1e61eb7a7550e4b301e09adbff Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:31:28 +0200 Subject: [PATCH 291/318] CI: delegate pipenv cache management to the setup-python action (#3243) --- .github/workflows/docker-tests.yml | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 228a0829984..918f3bcaf1d 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -53,23 +53,12 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.x" - - - name: "Install pipenv" - run: | - cd docker/test - python -m pip install --upgrade pipenv wheel - - - name: "Cache virtualenvs" - id: cache-pipenv - uses: actions/cache@v4 - with: - path: ~/.local/share/virtualenvs - key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + cache: 'pipenv' - name: "Install dependencies" - if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test + python -m pip install --upgrade pipenv wheel pipenv install --deploy - name: "Create Docker network" From 7c5d4d8b3d3e4fcc9f7383cfd3455a1e4e7f1c14 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:06:31 +0200 Subject: [PATCH 292/318] context propagation: explicit ctx parameter in unit tests (#3229) --- pkg/acquisition/modules/loki/loki_test.go | 19 +++-- pkg/apiserver/alerts_test.go | 97 +++++++++++++---------- pkg/apiserver/api_key_test.go | 9 ++- pkg/apiserver/apiserver_test.go | 16 +++- pkg/apiserver/jwt_test.go | 19 +++-- pkg/apiserver/machines_test.go | 58 +++++++++----- 6 files changed, 131 insertions(+), 87 deletions(-) diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 5f41cd4c62e..2fd2b61e995 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -95,7 +95,6 @@ query: > delayFor: 1 * time.Second, }, { - config: ` mode: tail source: loki @@ -111,7 +110,6 @@ query: > testName: "Correct config with password", }, { - config: ` mode: tail source: loki @@ -261,7 +259,7 @@ func TestConfigureDSN(t *testing.T) { } } -func feedLoki(logger *log.Entry, n int, title string) error { +func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error { streams := LogStreams{ Streams: []LogStream{ { @@ -286,7 +284,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { return err } - req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } @@ -344,12 +342,13 @@ since: 1h subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) - if err != nil { t.Fatalf("Unexpected error : %s", err) } - err = feedLoki(subLogger, 20, title) + ctx := context.Background() + + err = feedLoki(ctx, subLogger, 20, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -421,6 +420,8 @@ query: > }, } + ctx := context.Background() + for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -472,7 +473,7 @@ query: > } }) - err = feedLoki(subLogger, ts.expectedLines, title) + err = feedLoki(ctx, subLogger, ts.expectedLines, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -525,7 +526,9 @@ query: > time.Sleep(time.Second * 2) - err = feedLoki(subLogger, 1, title) + ctx := context.Background() + + err = feedLoki(ctx, subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 891eb3a8f4a..d801ff86f45 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "net/http" @@ -45,8 +46,9 @@ func (l *LAPI) InsertAlertFromFile(t *testing.T, path string) *httptest.Response } func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { + ctx := context.Background() w := httptest.NewRecorder() - req, err := http.NewRequest(verb, url, body) + req, err := http.NewRequestWithContext(ctx, verb, url, body) require.NoError(t, err) switch authType { @@ -74,8 +76,9 @@ func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) mo body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) + ctx := context.Background() w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -95,13 +98,13 @@ func TestSimulatedAlert(t *testing.T) { lapi := SetupLAPITest(t) lapi.InsertAlertFromFile(t, "./tests/alert_minibulk+simul.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_minibulk+simul.json") - //exclude decision in simulation mode + // exclude decision in simulation mode w := lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) - //include decision in simulation mode + // include decision in simulation mode w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", alertContent, "password") assert.Equal(t, 200, w.Code) @@ -122,7 +125,9 @@ func TestCreateAlert(t *testing.T) { w = lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", alertContent, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) + assert.Equal(t, + `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, + w.Body.String()) // Create Valid Alert w = lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") @@ -162,163 +167,163 @@ func TestAlertListFilters(t *testing.T) { lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_ssh-bf.json") - //bad filter + // bad filter w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) - //get without filters + // get without filters w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) - //check alert and decision + // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test decision_type filter (ok) + // test decision_type filter (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test decision_type filter (bad value) + // test decision_type filter (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test scope (ok) + // test scope (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test scope (bad value) + // test scope (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test scenario (ok) + // test scenario (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test scenario (bad value) + // test scenario (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test ip (ok) + // test ip (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test ip (bad value) + // test ip (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test ip (invalid value) + // test ip (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) - //test range (ok) + // test range (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test range + // test range w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test range (invalid value) + // test range (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) - //test since (ok) + // test since (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1h", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test since (ok but yields no results) + // test since (ok but yields no results) w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test since (invalid value) + // test since (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) - //test until (ok) + // test until (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test until (ok but no return) + // test until (ok but no return) w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1m", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test until (invalid value) + // test until (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) - //test simulated (ok) + // test simulated (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test simulated (ok) + // test simulated (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test has active decision + // test has active decision w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test has active decision + // test has active decision w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test has active decision (invalid value) + // test has active decision (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") assert.Equal(t, 500, w.Code) @@ -327,7 +332,7 @@ func TestAlertListFilters(t *testing.T) { func TestAlertBulkInsert(t *testing.T) { lapi := SetupLAPITest(t) - //insert a bulk of 20 alerts to trigger bulk insert + // insert a bulk of 20 alerts to trigger bulk insert lapi.InsertAlertFromFile(t, "./tests/alert_bulk.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") @@ -355,17 +360,19 @@ func TestCreateAlertErrors(t *testing.T) { lapi := SetupLAPITest(t) alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") - //test invalid bearer + ctx := context.Background() + + // test invalid bearer w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/alerts", alertContent) req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) lapi.router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - //test invalid bearer + // test invalid bearer w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/alerts", alertContent) req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) lapi.router.ServeHTTP(w, req) @@ -376,9 +383,11 @@ func TestDeleteAlert(t *testing.T) { lapi := SetupLAPITest(t) lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + // Fail Delete Alert w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) @@ -387,7 +396,7 @@ func TestDeleteAlert(t *testing.T) { // Delete Alert w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) @@ -399,9 +408,11 @@ func TestDeleteAlertByID(t *testing.T) { lapi := SetupLAPITest(t) lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + // Fail Delete Alert w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) @@ -410,7 +421,7 @@ func TestDeleteAlertByID(t *testing.T) { // Delete Alert w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) @@ -439,9 +450,11 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { loginResp: loginResp, } + ctx := context.Background() + assertAlertDeleteFailedFromIP := func(ip string) { w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, loginResp) req.RemoteAddr = ip + ":1234" @@ -453,7 +466,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assertAlertDeletedFromIP := func(ip string) { w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, loginResp) req.RemoteAddr = ip + ":1234" diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 883ff21298d..10e75ae47f1 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "net/http/httptest" "strings" @@ -12,11 +13,13 @@ import ( func TestAPIKey(t *testing.T) { router, config := NewAPITest(t) + ctx := context.Background() + APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) // Login with empty token w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -25,7 +28,7 @@ func TestAPIKey(t *testing.T) { // Login with invalid token w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", "a1b2c3d4e5f6") router.ServeHTTP(w, req) @@ -35,7 +38,7 @@ func TestAPIKey(t *testing.T) { // Login with valid token w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", APIKey) router.ServeHTTP(w, req) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index f48791ebcb8..89c75f35d21 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -278,8 +278,10 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { body := string(b) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -323,8 +325,10 @@ func TestWithWrongFlushConfig(t *testing.T) { func TestUnknownPath(t *testing.T) { router, _ := NewAPITest(t) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", nil) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -380,8 +384,10 @@ func TestLoggingDebugToFileConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, api) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, 404, w.Code) @@ -430,8 +436,10 @@ func TestLoggingErrorToFileConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, api) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index aa6e84e416b..293cc38bd2c 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "net/http/httptest" "strings" @@ -12,11 +13,13 @@ import ( func TestLogin(t *testing.T) { router, config := NewAPITest(t) + ctx := context.Background() + body := CreateTestMachine(t, router, "") // Login with machine not validated yet w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -25,7 +28,7 @@ func TestLogin(t *testing.T) { // Login with machine not exist w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1", "password": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1", "password": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -34,7 +37,7 @@ func TestLogin(t *testing.T) { // Login with invalid body w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -43,19 +46,19 @@ func TestLogin(t *testing.T) { // Login with invalid format w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) - //Validate machine + // Validate machine ValidateMachine(t, "test", config.API.Server.DbConfig) // Login with invalid password w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -64,7 +67,7 @@ func TestLogin(t *testing.T) { // Login with valid machine w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -74,7 +77,7 @@ func TestLogin(t *testing.T) { // Login with valid machine + scenarios w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test", "scenarios": ["crowdsecurity/test", "crowdsecurity/test2"]}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test", "scenarios": ["crowdsecurity/test", "crowdsecurity/test2"]}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 041a6bee528..44c370732c7 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "net/http" "net/http/httptest" @@ -16,9 +17,11 @@ import ( func TestCreateMachine(t *testing.T) { router, _ := NewAPITest(t) + ctx := context.Background() + // Create machine with invalid format w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader("test")) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader("test")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -27,7 +30,7 @@ func TestCreateMachine(t *testing.T) { // Create machine with invalid input w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(`{"test": "test"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(`{"test": "test"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -41,7 +44,7 @@ func TestCreateMachine(t *testing.T) { body := string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -52,6 +55,9 @@ func TestCreateMachine(t *testing.T) { func TestCreateMachineWithForwardedFor(t *testing.T) { router, config := NewAPITestForwardedFor(t) router.TrustedPlatform = "X-Real-IP" + + ctx := context.Background() + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -59,7 +65,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Real-Ip", "1.1.1.1") router.ServeHTTP(w, req) @@ -75,6 +81,8 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { router, config := NewAPITest(t) + ctx := context.Background() + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -82,7 +90,7 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Real-IP", "1.1.1.1") router.ServeHTTP(w, req) @@ -92,14 +100,16 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) - //For some reason, the IP is empty when running tests - //if no forwarded-for headers are present + // For some reason, the IP is empty when running tests + // if no forwarded-for headers are present assert.Equal(t, "", ip) } func TestCreateMachineWithoutForwardedFor(t *testing.T) { router, config := NewAPITestForwardedFor(t) + ctx := context.Background() + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -107,7 +117,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -116,23 +126,25 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) - //For some reason, the IP is empty when running tests - //if no forwarded-for headers are present + // For some reason, the IP is empty when running tests + // if no forwarded-for headers are present assert.Equal(t, "", ip) } func TestCreateMachineAlreadyExist(t *testing.T) { router, _ := NewAPITest(t) + ctx := context.Background() + body := CreateTestMachine(t, router, "") w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -143,7 +155,9 @@ func TestCreateMachineAlreadyExist(t *testing.T) { func TestAutoRegistration(t *testing.T) { router, _ := NewAPITest(t) - //Invalid registration token / valid source IP + ctx := context.Background() + + // Invalid registration token / valid source IP regReq := MachineTest regReq.RegistrationToken = invalidRegistrationToken b, err := json.Marshal(regReq) @@ -152,14 +166,14 @@ func TestAutoRegistration(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //Invalid registration token / invalid source IP + // Invalid registration token / invalid source IP regReq = MachineTest regReq.RegistrationToken = invalidRegistrationToken b, err = json.Marshal(regReq) @@ -168,14 +182,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "42.42.42.42:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //valid registration token / invalid source IP + // valid registration token / invalid source IP regReq = MachineTest regReq.RegistrationToken = validRegistrationToken b, err = json.Marshal(regReq) @@ -184,14 +198,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "42.42.42.42:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //Valid registration token / valid source IP + // Valid registration token / valid source IP regReq = MachineTest regReq.RegistrationToken = validRegistrationToken b, err = json.Marshal(regReq) @@ -200,14 +214,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusAccepted, w.Code) - //No token / valid source IP + // No token / valid source IP regReq = MachineTest regReq.MachineID = ptr.Of("test2") b, err = json.Marshal(regReq) @@ -216,7 +230,7 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) From be391a3da8301cc8f3eef2c3740cdebc8aa295f8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:09:29 +0200 Subject: [PATCH 293/318] context propagation: pass context to NewAPIC() (#3231) --- cmd/crowdsec-cli/clipapi/papi.go | 12 +++++++----- pkg/apiserver/apic.go | 4 ++-- pkg/apiserver/apic_test.go | 4 +++- pkg/apiserver/apiserver.go | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 747b8c01b9b..c0f08157f31 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -59,7 +59,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Client) error { cfg := cli.cfg() - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(ctx, cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } @@ -118,11 +118,11 @@ func (cli *cliPapi) newStatusCmd() *cobra.Command { return cmd } -func (cli *cliPapi) sync(out io.Writer, db *database.Client) error { +func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client) error { cfg := cli.cfg() t := tomb.Tomb{} - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(ctx, cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } @@ -159,12 +159,14 @@ func (cli *cliPapi) newSyncCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } - return cli.sync(color.Output, db) + return cli.sync(ctx, color.Output, db) }, } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 73061637ad9..3ed2e12ea54 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -174,7 +174,7 @@ func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) return signal } -func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { +func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { var err error ret := &apic{ @@ -237,7 +237,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con return ret, fmt.Errorf("get scenario in db: %w", err) } - authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &config.Credentials.Login, Password: &password, Scenarios: scenarios, diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 058e25079e0..105d295dd0d 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -230,6 +230,8 @@ func TestNewAPIC(t *testing.T) { }, } + ctx := context.Background() + for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { setConfig() @@ -246,7 +248,7 @@ func TestNewAPIC(t *testing.T) { ), )) tc.action() - _, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig, nil) + _, err := NewAPIC(ctx, testConfig, tc.args.dbClient, tc.args.consoleConfig, nil) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 42dcb219379..8bf406e0a79 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -249,7 +249,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { if config.OnlineClient != nil && config.OnlineClient.Credentials != nil { log.Printf("Loading CAPI manager") - apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists) + apiClient, err = NewAPIC(ctx, config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists) if err != nil { return nil, err } From 2a60c9903692115cf847429aa7a2b332d339fe7b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:11:50 +0200 Subject: [PATCH 294/318] context propagation: pkg/database/flush (#3235) --- cmd/crowdsec-cli/clialert/alerts.go | 6 ++-- pkg/apiserver/apiserver.go | 2 +- pkg/database/flush.go | 43 +++++++++++++++-------------- 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 0965e1e13d0..dbb7ca14db5 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -575,15 +575,17 @@ func (cli *cliAlerts) newFlushCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() + ctx := cmd.Context() + if err := require.LAPI(cfg); err != nil { return err } - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } log.Info("Flushing alerts. !! This may take a long time !!") - err = db.FlushAlerts(maxAge, maxItems) + err = db.FlushAlerts(ctx, maxAge, maxItems) if err != nil { return fmt.Errorf("unable to flush alerts: %w", err) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 8bf406e0a79..95d18ccb028 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -170,7 +170,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { } if config.DbConfig.Flush != nil { - flushScheduler, err = dbClient.StartFlushScheduler(config.DbConfig.Flush) + flushScheduler, err = dbClient.StartFlushScheduler(ctx, config.DbConfig.Flush) if err != nil { return nil, err } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 5d53d10c942..46c8edfa308 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -1,6 +1,7 @@ package database import ( + "context" "errors" "fmt" "time" @@ -26,7 +27,7 @@ const ( flushInterval = 1 * time.Minute ) -func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { +func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" @@ -45,7 +46,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) - job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, ctx, maxAge, maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) } @@ -100,14 +101,14 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } } - baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, ctx, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) } baJob.SingletonMode() - metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, config.MetricsMaxAge) + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, ctx, config.MetricsMaxAge) if err != nil { return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) } @@ -120,7 +121,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } // flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not -func (c *Client) flushMetrics(maxAge *time.Duration) { +func (c *Client) flushMetrics(ctx context.Context, maxAge *time.Duration) { if maxAge == nil { maxAge = ptr.Of(defaultMetricsMaxAge) } @@ -129,7 +130,7 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { deleted, err := c.Ent.Metric.Delete().Where( metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while flushing metrics: %s", err) return @@ -140,10 +141,10 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { } } -func (c *Client) FlushOrphans() { +func (c *Client) FlushOrphans(ctx context.Context) { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ - eventsCount, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(c.CTX) + eventsCount, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(ctx) if err != nil { c.Log.Warningf("error while deleting orphan events: %s", err) return @@ -154,7 +155,7 @@ func (c *Client) FlushOrphans() { } eventsCount, err = c.Ent.Decision.Delete().Where( - decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) + decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(ctx) if err != nil { c.Log.Warningf("error while deleting orphan decisions: %s", err) return @@ -165,7 +166,7 @@ func (c *Client) FlushOrphans() { } } -func (c *Client) flushBouncers(authType string, duration *time.Duration) { +func (c *Client) flushBouncers(ctx context.Context, authType string, duration *time.Duration) { if duration == nil { return } @@ -174,7 +175,7 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { bouncer.LastPullLTE(time.Now().UTC().Add(-*duration)), ).Where( bouncer.AuthTypeEQ(authType), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) return @@ -185,7 +186,7 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { } } -func (c *Client) flushAgents(authType string, duration *time.Duration) { +func (c *Client) flushAgents(ctx context.Context, authType string, duration *time.Duration) { if duration == nil { return } @@ -194,7 +195,7 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { machine.LastHeartbeatLTE(time.Now().UTC().Add(-*duration)), machine.Not(machine.HasAlerts()), machine.AuthTypeEQ(authType), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) return @@ -205,23 +206,23 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { } } -func (c *Client) FlushAgentsAndBouncers(agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { +func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { log.Debug("starting FlushAgentsAndBouncers") if agentsCfg != nil { - c.flushAgents(types.TlsAuthType, agentsCfg.CertDuration) - c.flushAgents(types.PasswordAuthType, agentsCfg.LoginPasswordDuration) + c.flushAgents(ctx, types.TlsAuthType, agentsCfg.CertDuration) + c.flushAgents(ctx, types.PasswordAuthType, agentsCfg.LoginPasswordDuration) } if bouncersCfg != nil { - c.flushBouncers(types.TlsAuthType, bouncersCfg.CertDuration) - c.flushBouncers(types.ApiKeyAuthType, bouncersCfg.ApiDuration) + c.flushBouncers(ctx, types.TlsAuthType, bouncersCfg.CertDuration) + c.flushBouncers(ctx, types.ApiKeyAuthType, bouncersCfg.ApiDuration) } return nil } -func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -235,7 +236,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } c.Log.Debug("Flushing orphan alerts") - c.FlushOrphans() + c.FlushOrphans(ctx) c.Log.Debug("Done flushing orphan alerts") totalAlerts, err = c.TotalAlerts() @@ -287,7 +288,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { if maxid > 0 { // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted - deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) + deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(ctx) if err != nil { c.Log.Errorf("FlushAlerts: Could not delete alerts: %s", err) return fmt.Errorf("could not delete alerts: %w", err) From b4a2403fdfab37560a15fae9d6208f67b1f22b5e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:14:33 +0200 Subject: [PATCH 295/318] context propagation: bouncer list (#3236) --- cmd/crowdsec-cli/clibouncer/bouncers.go | 14 ++++++++------ cmd/crowdsec-cli/clisupport/support.go | 6 +++--- pkg/apiserver/apic_metrics.go | 16 ++++++++++------ pkg/apiserver/apic_test.go | 8 +++++--- pkg/database/bouncers.go | 4 ++-- 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 0c0fc8851c9..89e91b63911 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -1,6 +1,7 @@ package clibouncer import ( + "context" "encoding/csv" "encoding/json" "errors" @@ -159,11 +160,11 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { return nil } -func (cli *cliBouncers) List(out io.Writer, db *database.Client) error { +func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - bouncers, err := db.ListBouncers() + bouncers, err := db.ListBouncers(ctx) if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) } @@ -199,8 +200,8 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Example: `cscli bouncers list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.List(color.Output, cli.db) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) }, } @@ -271,6 +272,7 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp var err error cfg := cli.cfg() + ctx := cmd.Context() // need to load config and db because PersistentPreRunE is not called for completions @@ -279,13 +281,13 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return nil, cobra.ShellCompDirectiveNoFileComp } - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + cli.db, err = require.DBClient(ctx, cfg.DbConfig) if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp } - bouncers, err := cli.db.ListBouncers() + bouncers, err := cli.db.ListBouncers(ctx) if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index e9837b03fe7..7e41518805a 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -189,7 +189,7 @@ func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpBouncers(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting bouncers") if db == nil { @@ -199,7 +199,7 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) cb := clibouncer.New(cli.cfg) - if err := cb.List(out, db); err != nil { + if err := cb.List(ctx, out, db); err != nil { return err } @@ -525,7 +525,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect hub information: %s", err) } - if err = cli.dumpBouncers(zipWriter, db); err != nil { + if err = cli.dumpBouncers(ctx, zipWriter, db); err != nil { log.Warnf("could not collect bouncers information: %s", err) } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 5c6a550a6a0..91a0a8273f7 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -23,7 +23,7 @@ type dbPayload struct { Metrics []*models.DetailedMetrics `json:"metrics"` } -func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { +func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, error) { allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) @@ -32,7 +32,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { return nil, nil, err } - bouncers, err := a.dbClient.ListBouncers() + bouncers, err := a.dbClient.ListBouncers(ctx) if err != nil { return nil, nil, err } @@ -185,7 +185,7 @@ func (a *apic) MarkUsageMetricsAsSent(ids []int) error { return a.dbClient.MarkUsageMetricsAsSent(ids) } -func (a *apic) GetMetrics() (*models.Metrics, error) { +func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { machines, err := a.dbClient.ListMachines() if err != nil { return nil, err @@ -202,7 +202,7 @@ func (a *apic) GetMetrics() (*models.Metrics, error) { } } - bouncers, err := a.dbClient.ListBouncers() + bouncers, err := a.dbClient.ListBouncers(ctx) if err != nil { return nil, err } @@ -254,6 +254,8 @@ func (a *apic) fetchMachineIDs() ([]string, error) { func (a *apic) SendMetrics(stop chan (bool)) { defer trace.CatchPanic("lapi/metricsToAPIC") + ctx := context.TODO() + // verify the list of machines every interval const checkInt = 20 * time.Second @@ -311,7 +313,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { case <-metTicker.C: metTicker.Stop() - metrics, err := a.GetMetrics() + metrics, err := a.GetMetrics(ctx) if err != nil { log.Errorf("unable to get metrics (%s)", err) } @@ -340,6 +342,8 @@ func (a *apic) SendMetrics(stop chan (bool)) { func (a *apic) SendUsageMetrics() { defer trace.CatchPanic("lapi/usageMetricsToAPIC") + ctx := context.TODO() + firstRun := true log.Debugf("Start sending usage metrics to CrowdSec Central API (interval: %s once, then %s)", a.usageMetricsIntervalFirst, a.usageMetricsInterval) @@ -358,7 +362,7 @@ func (a *apic) SendUsageMetrics() { ticker.Reset(a.usageMetricsInterval) } - metrics, metricsId, err := a.GetUsageMetrics() + metrics, metricsId, err := a.GetUsageMetrics(ctx) if err != nil { log.Errorf("unable to get usage metrics: %s", err) continue diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 105d295dd0d..182bf18532f 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -292,9 +292,11 @@ func TestAPICHandleDeletedDecisions(t *testing.T) { } func TestAPICGetMetrics(t *testing.T) { + ctx := context.Background() + cleanUp := func(api *apic) { - api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) - api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) + api.dbClient.Ent.Machine.Delete().ExecX(ctx) } tests := []struct { name string @@ -377,7 +379,7 @@ func TestAPICGetMetrics(t *testing.T) { ExecX(context.Background()) } - foundMetrics, err := apiClient.GetMetrics() + foundMetrics, err := apiClient.GetMetrics(ctx) require.NoError(t, err) assert.Equal(t, tc.expectedMetric.Bouncers, foundMetrics.Bouncers) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index a7378bbb203..6ff308ff786 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -59,8 +59,8 @@ func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { return result, nil } -func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().All(c.CTX) +func (c *Client) ListBouncers(ctx context.Context) ([]*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "listing bouncers: %s", err) } From eeb28014c6860a0f50e87ef1488fb641d09edbb9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:09:35 +0200 Subject: [PATCH 296/318] context propagation: pkg/database/config (#3246) --- cmd/crowdsec-cli/clipapi/papi.go | 2 +- pkg/apiserver/apic.go | 28 ++++++++++++++-------------- pkg/apiserver/apic_test.go | 24 +++++++++++++++--------- pkg/apiserver/apiserver.go | 18 ++++++++++-------- pkg/apiserver/papi.go | 8 ++++---- pkg/apiserver/papi_cmd.go | 7 +++++-- pkg/database/config.go | 17 ++++++++++------- 7 files changed, 59 insertions(+), 45 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index c0f08157f31..b8101a0fb34 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -74,7 +74,7 @@ func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Clie return fmt.Errorf("unable to get PAPI permissions: %w", err) } - lastTimestampStr, err := db.GetConfigItem(apiserver.PapiPullKey) + lastTimestampStr, err := db.GetConfigItem(ctx, apiserver.PapiPullKey) if err != nil { lastTimestampStr = ptr.Of("never") } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 3ed2e12ea54..b5384c6cc5c 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -614,7 +614,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio // we receive a list of decisions and links for blocklist and we need to create a list of alerts : // one alert for "community blocklist" // one alert per list we're subscribed to -func (a *apic) PullTop(forcePull bool) error { +func (a *apic) PullTop(ctx context.Context, forcePull bool) error { var err error // A mutex with TryLock would be a bit simpler @@ -655,7 +655,7 @@ func (a *apic) PullTop(forcePull bool) error { log.Infof("Starting community-blocklist update") - data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) + data, _, err := a.apiClient.Decisions.GetStreamV3(ctx, apiclient.DecisionsStreamOpts{Startup: a.startup}) if err != nil { return fmt.Errorf("get stream: %w", err) } @@ -700,7 +700,7 @@ func (a *apic) PullTop(forcePull bool) error { } // update blocklists - if err := a.UpdateBlocklists(data.Links, addCounters, forcePull); err != nil { + if err := a.UpdateBlocklists(ctx, data.Links, addCounters, forcePull); err != nil { return fmt.Errorf("while updating blocklists: %w", err) } @@ -708,9 +708,9 @@ func (a *apic) PullTop(forcePull bool) error { } // we receive a link to a blocklist, we pull the content of the blocklist and we create one alert -func (a *apic) PullBlocklist(blocklist *modelscapi.BlocklistLink, forcePull bool) error { +func (a *apic) PullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink, forcePull bool) error { addCounters, _ := makeAddAndDeleteCounters() - if err := a.UpdateBlocklists(&modelscapi.GetDecisionsStreamResponseLinks{ + if err := a.UpdateBlocklists(ctx, &modelscapi.GetDecisionsStreamResponseLinks{ Blocklists: []*modelscapi.BlocklistLink{blocklist}, }, addCounters, forcePull); err != nil { return fmt.Errorf("while pulling blocklist: %w", err) @@ -820,7 +820,7 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo return false, nil } -func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { +func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, blocklist *modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { if blocklist.Scope == nil { log.Warningf("blocklist has no scope") return nil @@ -848,13 +848,13 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap ) if !forcePull { - lastPullTimestamp, err = a.dbClient.GetConfigItem(blocklistConfigItemName) + lastPullTimestamp, err = a.dbClient.GetConfigItem(ctx, blocklistConfigItemName) if err != nil { return fmt.Errorf("while getting last pull timestamp for blocklist %s: %w", *blocklist.Name, err) } } - decisions, hasChanged, err := client.Decisions.GetDecisionsFromBlocklist(context.Background(), blocklist, lastPullTimestamp) + decisions, hasChanged, err := client.Decisions.GetDecisionsFromBlocklist(ctx, blocklist, lastPullTimestamp) if err != nil { return fmt.Errorf("while getting decisions from blocklist %s: %w", *blocklist.Name, err) } @@ -869,7 +869,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap return nil } - err = a.dbClient.SetConfigItem(blocklistConfigItemName, time.Now().UTC().Format(http.TimeFormat)) + err = a.dbClient.SetConfigItem(ctx, blocklistConfigItemName, time.Now().UTC().Format(http.TimeFormat)) if err != nil { return fmt.Errorf("while setting last pull timestamp for blocklist %s: %w", *blocklist.Name, err) } @@ -892,7 +892,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap return nil } -func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { +func (a *apic) UpdateBlocklists(ctx context.Context, links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { if links == nil { return nil } @@ -908,7 +908,7 @@ func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLink } for _, blocklist := range links.Blocklists { - if err := a.updateBlocklist(defaultClient, blocklist, addCounters, forcePull); err != nil { + if err := a.updateBlocklist(ctx, defaultClient, blocklist, addCounters, forcePull); err != nil { return err } } @@ -931,7 +931,7 @@ func setAlertScenario(alert *models.Alert, addCounters map[string]map[string]int } } -func (a *apic) Pull() error { +func (a *apic) Pull(ctx context.Context) error { defer trace.CatchPanic("lapi/pullFromAPIC") toldOnce := false @@ -955,7 +955,7 @@ func (a *apic) Pull() error { time.Sleep(1 * time.Second) } - if err := a.PullTop(false); err != nil { + if err := a.PullTop(ctx, false); err != nil { log.Errorf("capi pull top: %s", err) } @@ -967,7 +967,7 @@ func (a *apic) Pull() error { case <-ticker.C: ticker.Reset(a.pullInterval) - if err := a.PullTop(false); err != nil { + if err := a.PullTop(ctx, false); err != nil { log.Errorf("capi pull top: %s", err) continue } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 182bf18532f..97943b495e5 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -550,6 +550,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { } func TestAPICWhitelists(t *testing.T) { + ctx := context.Background() api := getAPIC(t) // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} @@ -685,7 +686,7 @@ func TestAPICWhitelists(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing @@ -736,6 +737,7 @@ func TestAPICWhitelists(t *testing.T) { } func TestAPICPullTop(t *testing.T) { + ctx := context.Background() api := getAPIC(t) api.dbClient.Ent.Decision.Create(). SetOrigin(types.CAPIOrigin). @@ -826,7 +828,7 @@ func TestAPICPullTop(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) assertTotalDecisionCount(t, api.dbClient, 5) @@ -860,6 +862,7 @@ func TestAPICPullTop(t *testing.T) { } func TestAPICPullTopBLCacheFirstCall(t *testing.T) { + ctx := context.Background() // no decision in db, no last modified parameter. api := getAPIC(t) @@ -913,11 +916,11 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) blocklistConfigItemName := "blocklist:blocklist1:last_pull" - lastPullTimestamp, err := api.dbClient.GetConfigItem(blocklistConfigItemName) + lastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) assert.NotEqual(t, "", *lastPullTimestamp) @@ -927,14 +930,15 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { return httpmock.NewStringResponse(304, ""), nil }) - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) - secondLastPullTimestamp, err := api.dbClient.GetConfigItem(blocklistConfigItemName) + secondLastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) assert.Equal(t, *lastPullTimestamp, *secondLastPullTimestamp) } func TestAPICPullTopBLCacheForceCall(t *testing.T) { + ctx := context.Background() api := getAPIC(t) httpmock.Activate() @@ -1005,11 +1009,12 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) } func TestAPICPullBlocklistCall(t *testing.T) { + ctx := context.Background() api := getAPIC(t) httpmock.Activate() @@ -1032,7 +1037,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullBlocklist(&modelscapi.BlocklistLink{ + err = api.PullBlocklist(ctx, &modelscapi.BlocklistLink{ URL: ptr.Of("http://api.crowdsec.net/blocklist1"), Name: ptr.Of("blocklist1"), Scope: ptr.Of("Ip"), @@ -1134,6 +1139,7 @@ func TestAPICPush(t *testing.T) { } func TestAPICPull(t *testing.T) { + ctx := context.Background() api := getAPIC(t) tests := []struct { name string @@ -1204,7 +1210,7 @@ func TestAPICPull(t *testing.T) { go func() { logrus.SetOutput(&buf) - if err := api.Pull(); err != nil { + if err := api.Pull(ctx); err != nil { panic(err) } }() diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 95d18ccb028..6b5d6803be9 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -310,8 +310,8 @@ func (s *APIServer) apicPush() error { return nil } -func (s *APIServer) apicPull() error { - if err := s.apic.Pull(); err != nil { +func (s *APIServer) apicPull(ctx context.Context) error { + if err := s.apic.Pull(ctx); err != nil { log.Errorf("capi pull: %s", err) return err } @@ -319,8 +319,8 @@ func (s *APIServer) apicPull() error { return nil } -func (s *APIServer) papiPull() error { - if err := s.papi.Pull(); err != nil { +func (s *APIServer) papiPull(ctx context.Context) error { + if err := s.papi.Pull(ctx); err != nil { log.Errorf("papi pull: %s", err) return err } @@ -337,16 +337,16 @@ func (s *APIServer) papiSync() error { return nil } -func (s *APIServer) initAPIC() { +func (s *APIServer) initAPIC(ctx context.Context) { s.apic.pushTomb.Go(s.apicPush) - s.apic.pullTomb.Go(s.apicPull) + s.apic.pullTomb.Go(func() error { return s.apicPull(ctx) }) // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios if s.apic.apiClient.IsEnrolled() { if s.consoleConfig.IsPAPIEnabled() { if s.papi.URL != "" { log.Info("Starting PAPI decision receiver") - s.papi.pullTomb.Go(s.papiPull) + s.papi.pullTomb.Go(func() error { return s.papiPull(ctx) }) s.papi.syncTomb.Go(s.papiSync) } else { log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") @@ -381,8 +381,10 @@ func (s *APIServer) Run(apiReady chan bool) error { TLSConfig: tlsCfg, } + ctx := context.TODO() + if s.apic != nil { - s.initAPIC() + s.initAPIC(ctx) } s.httpServerTomb.Go(func() error { diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 89ad93930a1..7dd6b346aa9 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -230,13 +230,13 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { } // PullPAPI is the long polling client for real-time decisions from PAPI -func (p *Papi) Pull() error { +func (p *Papi) Pull(ctx context.Context) error { defer trace.CatchPanic("lapi/PullPAPI") p.Logger.Infof("Starting Polling API Pull") lastTimestamp := time.Time{} - lastTimestampStr, err := p.DBClient.GetConfigItem(PapiPullKey) + lastTimestampStr, err := p.DBClient.GetConfigItem(ctx, PapiPullKey) if err != nil { p.Logger.Warningf("failed to get last timestamp for papi pull: %s", err) } @@ -248,7 +248,7 @@ func (p *Papi) Pull() error { return fmt.Errorf("failed to serialize last timestamp: %w", err) } - if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { + if err := p.DBClient.SetConfigItem(ctx, PapiPullKey, string(binTime)); err != nil { p.Logger.Errorf("error setting papi pull last key: %s", err) } else { p.Logger.Debugf("config item '%s' set in database with value '%s'", PapiPullKey, string(binTime)) @@ -277,7 +277,7 @@ func (p *Papi) Pull() error { continue } - if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { + if err := p.DBClient.SetConfigItem(ctx, PapiPullKey, string(binTime)); err != nil { return fmt.Errorf("failed to update last timestamp: %w", err) } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index a1137161698..943eb4139de 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "time" @@ -215,17 +216,19 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } + ctx := context.TODO() + if forcePullMsg.Blocklist == nil { p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") - err = p.apic.PullTop(true) + err = p.apic.PullTop(ctx, true) if err != nil { return fmt.Errorf("failed to force pull operation: %w", err) } } else { p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) - err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ + err = p.apic.PullBlocklist(ctx, &modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, Remediation: &forcePullMsg.Blocklist.Remediation, diff --git a/pkg/database/config.go b/pkg/database/config.go index 8c3578ad596..89ccb1e1b28 100644 --- a/pkg/database/config.go +++ b/pkg/database/config.go @@ -1,17 +1,20 @@ package database import ( + "context" + "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) -func (c *Client) GetConfigItem(key string) (*string, error) { - result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(c.CTX) +func (c *Client) GetConfigItem(ctx context.Context, key string) (*string, error) { + result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(ctx) if err != nil && ent.IsNotFound(err) { return nil, nil } + if err != nil { return nil, errors.Wrapf(QueryFail, "select config item: %s", err) } @@ -19,16 +22,16 @@ func (c *Client) GetConfigItem(key string) (*string, error) { return &result.Value, nil } -func (c *Client) SetConfigItem(key string, value string) error { - - nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(c.CTX) - if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { //not found, create - err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(c.CTX) +func (c *Client) SetConfigItem(ctx context.Context, key string, value string) error { + nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(ctx) + if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { // not found, create + err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(ctx) if err != nil { return errors.Wrapf(QueryFail, "insert config item: %s", err) } } else if err != nil { return errors.Wrapf(QueryFail, "update config item: %s", err) } + return nil } From 29f3d2710d1f75662624a30027c939036f6133b6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:27:34 +0200 Subject: [PATCH 297/318] context propagation: pkg/database/metrics (#3247) --- pkg/apiserver/apic_metrics.go | 10 +++++----- pkg/apiserver/usage_metrics_test.go | 8 ++++++-- pkg/database/metrics.go | 12 ++++++------ 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 91a0a8273f7..e5821e4c1e2 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -38,7 +38,7 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, } for _, bouncer := range bouncers { - dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) + dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(ctx, bouncer.Name) if err != nil { log.Errorf("unable to get bouncer usage metrics: %s", err) continue @@ -81,7 +81,7 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, } for _, lp := range lps { - dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) + dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(ctx, lp.MachineId) if err != nil { log.Errorf("unable to get LP usage metrics: %s", err) continue @@ -181,8 +181,8 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, return allMetrics, metricsIds, nil } -func (a *apic) MarkUsageMetricsAsSent(ids []int) error { - return a.dbClient.MarkUsageMetricsAsSent(ids) +func (a *apic) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { + return a.dbClient.MarkUsageMetricsAsSent(ctx, ids) } func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { @@ -379,7 +379,7 @@ func (a *apic) SendUsageMetrics() { } } - err = a.MarkUsageMetricsAsSent(metricsId) + err = a.MarkUsageMetricsAsSent(ctx, metricsId) if err != nil { log.Errorf("unable to mark usage metrics as sent: %s", err) continue diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 41dd0ccdc2c..019de5fb970 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -13,6 +13,8 @@ import ( ) func TestLPMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string body string @@ -198,7 +200,7 @@ func TestLPMetrics(t *testing.T) { assert.Contains(t, w.Body.String(), tt.expectedResponse) machine, _ := dbClient.QueryMachineByID("test") - metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") + metrics, _ := dbClient.GetLPUsageMetricsByMachineID(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, machine.Osname) @@ -214,6 +216,8 @@ func TestLPMetrics(t *testing.T) { } func TestRCMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string body string @@ -368,7 +372,7 @@ func TestRCMetrics(t *testing.T) { assert.Contains(t, w.Body.String(), tt.expectedResponse) bouncer, _ := dbClient.SelectBouncerByName("test") - metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") + metrics, _ := dbClient.GetBouncerUsageMetricsByName(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, bouncer.Osname) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 1619fcc923b..99ba90c80b8 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -25,14 +25,14 @@ func (c *Client) CreateMetric(ctx context.Context, generatedType metric.Generate return metric, nil } -func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { +func (c *Client) GetLPUsageMetricsByMachineID(ctx context.Context, machineId string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeLP), metric.GeneratedByEQ(machineId), metric.PushedAtIsNil(), ). - All(c.CTX) + All(ctx) if err != nil { c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) return nil, fmt.Errorf("getting LP usage metrics by origin %s: %w", machineId, err) @@ -41,14 +41,14 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, return metrics, nil } -func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { +func (c *Client) GetBouncerUsageMetricsByName(ctx context.Context, bouncerName string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeRC), metric.GeneratedByEQ(bouncerName), metric.PushedAtIsNil(), ). - All(c.CTX) + All(ctx) if err != nil { c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) return nil, fmt.Errorf("getting bouncer usage metrics by name %s: %w", bouncerName, err) @@ -57,11 +57,11 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric return metrics, nil } -func (c *Client) MarkUsageMetricsAsSent(ids []int) error { +func (c *Client) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { _, err := c.Ent.Metric.Update(). Where(metric.IDIn(ids...)). SetPushedAt(time.Now().UTC()). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) return fmt.Errorf("marking usage metrics as sent: %w", err) From 00032d40a887e7847c5877d96fb5506408836b9c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:59:02 +0200 Subject: [PATCH 298/318] lint/nestif: reduce hubtest complexity (#3244) --- .golangci.yml | 2 +- cmd/crowdsec-cli/clihubtest/run.go | 66 +++++++++++++++++++----------- 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 34837437355..b76e2613be7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -27,7 +27,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 19 + min-complexity: 16 nlreturn: block-size: 5 diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 57956ce67dc..31cceb81884 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -56,6 +56,46 @@ func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost stri return nil } +func printParserFailures(test *hubtest.HubTestItem) { + if len(test.ParserAssert.Fails) == 0 { + return + } + + fmt.Println() + log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) + + for _, fail := range test.ParserAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + + fmt.Println() + } +} + +func printScenarioFailures(test *hubtest.HubTestItem) { + if len(test.ScenarioAssert.Fails) == 0 { + return + } + + fmt.Println() + log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) + + for _, fail := range test.ScenarioAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + + fmt.Println() + } +} + func (cli *cliHubTest) newRunCmd() *cobra.Command { var ( noClean bool @@ -111,30 +151,8 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { success = false cleanTestEnv := false if cfg.Cscli.Output == "human" { - if len(test.ParserAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) - for _, fail := range test.ParserAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if len(test.ScenarioAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) - for _, fail := range test.ScenarioAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } + printParserFailures(test) + printScenarioFailures(test) if !forceClean && !noClean { prompt := &survey.Confirm{ Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), From e2196bdd66a4ed30b444997d1ad89bed8a601756 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:34:53 +0200 Subject: [PATCH 299/318] remove dependency from pkg/cwversion to pkg/acquisition (#3242) * register built-in components without dependencies * package comment --- Makefile | 3 +- cmd/crowdsec-cli/main.go | 5 +-- cmd/crowdsec-cli/setup.go | 18 ++++++++++ cmd/crowdsec-cli/setup_stub.go | 9 +++++ pkg/acquisition/acquisition.go | 51 +++++++++++++++------------- pkg/cwversion/component/component.go | 34 +++++++++++++++++++ pkg/cwversion/version.go | 22 +++++++----- 7 files changed, 104 insertions(+), 38 deletions(-) create mode 100644 cmd/crowdsec-cli/setup.go create mode 100644 cmd/crowdsec-cli/setup_stub.go create mode 100644 pkg/cwversion/component/component.go diff --git a/Makefile b/Makefile index 6bd3cbb7980..a0b06dc2ea0 100644 --- a/Makefile +++ b/Makefile @@ -138,7 +138,8 @@ COMPONENTS := \ datasource_loki \ datasource_s3 \ datasource_syslog \ - datasource_wineventlog + datasource_wineventlog \ + cscli_setup comma := , space := $(empty) $(empty) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 01179cf93be..1cca03b1d3d 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -28,7 +28,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisupport" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -281,9 +280,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliitem.NewAppsecConfig(cli.cfg).NewCommand()) cmd.AddCommand(cliitem.NewAppsecRule(cli.cfg).NewCommand()) - if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) - } + cli.addSetup(cmd) if len(os.Args) > 1 { cobra.OnInitialize( diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go new file mode 100644 index 00000000000..66c0d71e777 --- /dev/null +++ b/cmd/crowdsec-cli/setup.go @@ -0,0 +1,18 @@ +//go:build !no_cscli_setup +package main + +import ( + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" + "github.com/crowdsecurity/crowdsec/pkg/fflag" +) + +func (cli *cliRoot) addSetup(cmd *cobra.Command) { + if fflag.CscliSetup.IsEnabled() { + cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) + } + + component.Register("cscli_setup") +} diff --git a/cmd/crowdsec-cli/setup_stub.go b/cmd/crowdsec-cli/setup_stub.go new file mode 100644 index 00000000000..e001f93c797 --- /dev/null +++ b/cmd/crowdsec-cli/setup_stub.go @@ -0,0 +1,9 @@ +//go:build no_cscli_setup +package main + +import ( + "github.com/spf13/cobra" +) + +func (cli *cliRoot) addSetup(_ *cobra.Command) { +} diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index b2493bbb9b7..4a5226a2981 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -19,6 +19,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -54,44 +55,34 @@ type DataSource interface { var ( // We declare everything here so we can tell if they are unsupported, or excluded from the build - AcquisitionSources = map[string]func() DataSource{ - "appsec": nil, - "cloudwatch": nil, - "docker": nil, - "file": nil, - "journalctl": nil, - "k8s-audit": nil, - "kafka": nil, - "kinesis": nil, - "loki": nil, - "s3": nil, - "syslog": nil, - "wineventlog": nil, - } - transformRuntimes = map[string]*vm.Program{} + AcquisitionSources = map[string]func() DataSource{} + transformRuntimes = map[string]*vm.Program{} ) func GetDataSourceIface(dataSourceType string) (DataSource, error) { - source, ok := AcquisitionSources[dataSourceType] - if !ok { + source, registered := AcquisitionSources[dataSourceType] + if registered { + return source(), nil + } + + built, known := component.Built["datasource_"+dataSourceType] + + if !known { return nil, fmt.Errorf("unknown data source %s", dataSourceType) } - if source == nil { - return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) + if built { + panic("datasource " + dataSourceType + " is built but not registered") } - return source(), nil + return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) } // registerDataSource registers a datasource in the AcquisitionSources map. // It must be called in the init() function of the datasource package, and the datasource name // must be declared with a nil value in the map, to allow for conditional compilation. func registerDataSource(dataSourceType string, dsGetter func() DataSource) { - _, ok := AcquisitionSources[dataSourceType] - if !ok { - panic("datasource must be declared in the map: " + dataSourceType) - } + component.Register("datasource_" + dataSourceType) AcquisitionSources[dataSourceType] = dsGetter } @@ -214,9 +205,11 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom == nil { return configuration.METRICS_FULL } + if !prom.Enabled { return configuration.METRICS_NONE } + if prom.Level == configuration.CFG_METRICS_AGGREGATE { return configuration.METRICS_AGGREGATE } @@ -224,6 +217,7 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom.Level == configuration.CFG_METRICS_FULL { return configuration.METRICS_FULL } + return configuration.METRICS_FULL } @@ -232,15 +226,20 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig var sources []DataSource metrics_level := GetMetricsLevelFromPromCfg(prom) + for _, acquisFile := range config.AcquisitionFiles { log.Infof("loading acquisition file : %s", acquisFile) + yamlFile, err := os.Open(acquisFile) if err != nil { return nil, err } + dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + idx := -1 + for { var sub configuration.DataSourceCommonCfg err = dec.Decode(&sub) @@ -249,7 +248,9 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig if !errors.Is(err, io.EOF) { return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) } + log.Tracef("End of yaml file") + break } @@ -263,11 +264,13 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Debugf("skipping empty item in %s", acquisFile) continue } + if sub.Source != "docker" { // docker is the only source that can be empty return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) } } + if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) } diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go new file mode 100644 index 00000000000..4036b63cf00 --- /dev/null +++ b/pkg/cwversion/component/component.go @@ -0,0 +1,34 @@ +package component + +// Package component provides functionality for managing the registration of +// optional, compile-time components in the system. This is meant as a space +// saving measure, separate from feature flags (package pkg/fflag) which are +// only enabled/disabled at runtime. + +// Built is a map of all the known components, and whether they are built-in or not. +// This is populated as soon as possible by the respective init() functions +var Built = map[string]bool { + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog":false, + "cscli_setup": false, +} + +func Register(name string) { + if _, ok := Built[name]; !ok { + // having a list of the disabled components is essential + // to debug users' issues + panic("cannot register unknown compile-time component: " + name) + } + + Built[name] = true +} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 867098e7d5a..2cb7de13e18 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -7,8 +7,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" ) @@ -18,16 +18,16 @@ var ( ) func FullString() string { - dsBuilt := []string{} - dsExcluded := []string{} + dsBuilt := map[string]struct{}{} + dsExcluded := map[string]struct{}{} - for _, ds := range maptools.SortedKeys(acquisition.AcquisitionSources) { - if acquisition.AcquisitionSources[ds] != nil { - dsBuilt = append(dsBuilt, ds) + for ds, built := range component.Built { + if built { + dsBuilt[ds] = struct{}{} continue } - dsExcluded = append(dsExcluded, ds) + dsExcluded[ds] = struct{}{} } ret := fmt.Sprintf("version: %s\n", version.String()) @@ -42,12 +42,16 @@ func FullString() string { ret += fmt.Sprintf("Constraint_api: %s\n", constraint.API) ret += fmt.Sprintf("Constraint_acquis: %s\n", constraint.Acquis) + built := "(none)" + if len(dsBuilt) > 0 { - ret += fmt.Sprintf("Built data sources: %s\n", strings.Join(dsBuilt, ", ")) + built = strings.Join(maptools.SortedKeys(dsBuilt), ", ") } + ret += fmt.Sprintf("Built-in optional components: %s\n", built) + if len(dsExcluded) > 0 { - ret += fmt.Sprintf("Excluded data sources: %s\n", strings.Join(dsExcluded, ", ")) + ret += fmt.Sprintf("Excluded components: %s\n", strings.Join(maptools.SortedKeys(dsExcluded), ", ")) } return ret From fee3debdccc71b7b4848cea95e8da0ea276117df Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:00:58 +0200 Subject: [PATCH 300/318] context propagation: pkg/database/machines (#3248) --- cmd/crowdsec-cli/climachine/machines.go | 56 ++++++++++++---------- cmd/crowdsec-cli/clisupport/support.go | 6 +-- pkg/apiserver/apic.go | 4 +- pkg/apiserver/apic_metrics.go | 10 ++-- pkg/apiserver/apiserver_test.go | 10 ++-- pkg/apiserver/controllers/v1/heartbeat.go | 4 +- pkg/apiserver/controllers/v1/machines.go | 4 +- pkg/apiserver/middlewares/v1/jwt.go | 15 +++--- pkg/apiserver/usage_metrics_test.go | 32 ++++++------- pkg/database/alerts.go | 4 +- pkg/database/machines.go | 58 +++++++++++------------ 11 files changed, 109 insertions(+), 94 deletions(-) diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index 30948f43056..1fbedcf57fd 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -1,6 +1,7 @@ package climachine import ( + "context" "encoding/csv" "encoding/json" "errors" @@ -210,11 +211,11 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { return nil } -func (cli *cliMachines) List(out io.Writer, db *database.Client) error { +func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - machines, err := db.ListMachines() + machines, err := db.ListMachines(ctx) if err != nil { return fmt.Errorf("unable to list machines: %w", err) } @@ -251,8 +252,8 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Example: `cscli machines list`, Args: cobra.NoArgs, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.List(color.Output, cli.db) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) }, } @@ -278,8 +279,8 @@ func (cli *cliMachines) newAddCmd() *cobra.Command { cscli machines add MyTestMachine --auto cscli machines add MyTestMachine --password MyPassword cscli machines add -f- --auto > /tmp/mycreds.yaml`, - RunE: func(_ *cobra.Command, args []string) error { - return cli.add(args, string(password), dumpFile, apiURL, interactive, autoAdd, force) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args, string(password), dumpFile, apiURL, interactive, autoAdd, force) }, } @@ -294,7 +295,7 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, return cmd } -func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { +func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { var ( err error machineID string @@ -353,7 +354,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri password := strfmt.Password(machinePassword) - _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) + _, err = cli.db.CreateMachine(ctx, &machineID, &password, "", true, force, types.PasswordAuthType) if err != nil { return fmt.Errorf("unable to create machine: %w", err) } @@ -399,6 +400,7 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp var err error cfg := cli.cfg() + ctx := cmd.Context() // need to load config and db because PersistentPreRunE is not called for completions @@ -407,13 +409,13 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return nil, cobra.ShellCompDirectiveNoFileComp } - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + cli.db, err = require.DBClient(ctx, cfg.DbConfig) if err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp } - machines, err := cli.db.ListMachines() + machines, err := cli.db.ListMachines(ctx) if err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -430,9 +432,9 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliMachines) delete(machines []string, ignoreMissing bool) error { +func (cli *cliMachines) delete(ctx context.Context, machines []string, ignoreMissing bool) error { for _, machineID := range machines { - if err := cli.db.DeleteWatcher(machineID); err != nil { + if err := cli.db.DeleteWatcher(ctx, machineID); err != nil { var notFoundErr *database.MachineNotFoundError if ignoreMissing && errors.As(err, ¬FoundErr) { return nil @@ -460,8 +462,8 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, - RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args, ignoreMissing) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) }, } @@ -471,7 +473,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { return cmd } -func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { +func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ @@ -484,12 +486,12 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } machines := []*ent.Machine{} - if pending, err := cli.db.QueryPendingMachine(); err == nil { + if pending, err := cli.db.QueryPendingMachine(ctx); err == nil { machines = append(machines, pending...) } if !notValidOnly { - if pending, err := cli.db.QueryMachinesInactiveSince(time.Now().UTC().Add(-duration)); err == nil { + if pending, err := cli.db.QueryMachinesInactiveSince(ctx, time.Now().UTC().Add(-duration)); err == nil { machines = append(machines, pending...) } } @@ -512,7 +514,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } } - deleted, err := cli.db.BulkDeleteWatchers(machines) + deleted, err := cli.db.BulkDeleteWatchers(ctx, machines) if err != nil { return fmt.Errorf("unable to prune machines: %w", err) } @@ -540,8 +542,8 @@ cscli machines prune --duration 1h cscli machines prune --not-validated-only --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.prune(duration, notValidOnly, force) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, notValidOnly, force) }, } @@ -553,8 +555,8 @@ cscli machines prune --not-validated-only --force`, return cmd } -func (cli *cliMachines) validate(machineID string) error { - if err := cli.db.ValidateMachine(machineID); err != nil { +func (cli *cliMachines) validate(ctx context.Context, machineID string) error { + if err := cli.db.ValidateMachine(ctx, machineID); err != nil { return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) } @@ -571,8 +573,8 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { Example: `cscli machines validate "machine_name"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.validate(args[0]) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(cmd.Context(), args[0]) }, } @@ -690,9 +692,11 @@ func (cli *cliMachines) newInspectCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() machineID := args[0] - machine, err := cli.db.QueryMachineByID(machineID) + + machine, err := cli.db.QueryMachineByID(ctx, machineID) if err != nil { return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 7e41518805a..4474f5c8f11 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -210,7 +210,7 @@ func (cli *cliSupport) dumpBouncers(ctx context.Context, zw *zip.Writer, db *dat return nil } -func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpAgents(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting agents") if db == nil { @@ -220,7 +220,7 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) cm := climachine.New(cli.cfg) - if err := cm.List(out, db); err != nil { + if err := cm.List(ctx, out, db); err != nil { return err } @@ -529,7 +529,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect bouncers information: %s", err) } - if err = cli.dumpAgents(zipWriter, db); err != nil { + if err = cli.dumpAgents(ctx, zipWriter, db); err != nil { log.Warnf("could not collect agents information: %s", err) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index b5384c6cc5c..c79d5f88e3f 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -85,7 +85,9 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { func (a *apic) FetchScenariosListFromDB() ([]string, error) { scenarios := make([]string, 0) - machines, err := a.dbClient.ListMachines() + ctx := context.TODO() + + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index e5821e4c1e2..16b2328dbe9 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -27,7 +27,7 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) - lps, err := a.dbClient.ListMachines() + lps, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, nil, err } @@ -186,7 +186,7 @@ func (a *apic) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { } func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { - machines, err := a.dbClient.ListMachines() + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, err } @@ -230,8 +230,8 @@ func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { }, nil } -func (a *apic) fetchMachineIDs() ([]string, error) { - machines, err := a.dbClient.ListMachines() +func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) { + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, err } @@ -277,7 +277,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { machineIDs := []string{} reloadMachineIDs := func() { - ids, err := a.fetchMachineIDs() + ids, err := a.fetchMachineIDs(ctx) if err != nil { log.Debugf("unable to get machines (%s), will retry", err) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 89c75f35d21..0db1ee5dcdc 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -182,12 +182,12 @@ func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { } func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - ctx := context.Background() + ctx := context.TODO() dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) - err = dbClient.ValidateMachine(machineID) + err = dbClient.ValidateMachine(ctx, machineID) require.NoError(t, err) } @@ -197,7 +197,7 @@ func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) - machines, err := dbClient.ListMachines() + machines, err := dbClient.ListMachines(ctx) require.NoError(t, err) for _, machine := range machines { @@ -332,7 +332,7 @@ func TestUnknownPath(t *testing.T) { req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 404, w.Code) + assert.Equal(t, http.StatusNotFound, w.Code) } /* @@ -390,7 +390,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) - assert.Equal(t, 404, w.Code) + assert.Equal(t, http.StatusNotFound, w.Code) // wait for the request to happen time.Sleep(500 * time.Millisecond) diff --git a/pkg/apiserver/controllers/v1/heartbeat.go b/pkg/apiserver/controllers/v1/heartbeat.go index e1231eaa9ec..799b736ccfe 100644 --- a/pkg/apiserver/controllers/v1/heartbeat.go +++ b/pkg/apiserver/controllers/v1/heartbeat.go @@ -9,7 +9,9 @@ import ( func (c *Controller) HeartBeat(gctx *gin.Context) { machineID, _ := getMachineIDFromContext(gctx) - if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { + ctx := gctx.Request.Context() + + if err := c.DBClient.UpdateMachineLastHeartBeat(ctx, machineID); err != nil { c.HandleDBErrors(gctx, err) return } diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index 0030f7d3b39..ff59e389cb1 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -46,6 +46,8 @@ func (c *Controller) shouldAutoRegister(token string, gctx *gin.Context) (bool, } func (c *Controller) CreateMachine(gctx *gin.Context) { + ctx := gctx.Request.Context() + var input models.WatcherRegistrationRequest if err := gctx.ShouldBindJSON(&input); err != nil { @@ -66,7 +68,7 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { return } - if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { + if _, err := c.DBClient.CreateMachine(ctx, input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { c.HandleDBErrors(gctx, err) return } diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 64406deff3e..17ca5b28359 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -55,6 +55,7 @@ type authInput struct { } func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { + ctx := c.Request.Context() ret := authInput{} if j.TlsAuth == nil { @@ -76,7 +77,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { ret.clientMachine, err = j.DbClient.Ent.Machine.Query(). Where(machine.MachineId(ret.machineID)). - First(j.DbClient.CTX) + First(ctx) if ent.IsNotFound(err) { // Machine was not found, let's create it logger.Infof("machine %s not found, create it", ret.machineID) @@ -91,7 +92,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { password := strfmt.Password(pwd) - ret.clientMachine, err = j.DbClient.CreateMachine(&ret.machineID, &password, "", true, true, types.TlsAuthType) + ret.clientMachine, err = j.DbClient.CreateMachine(ctx, &ret.machineID, &password, "", true, true, types.TlsAuthType) if err != nil { return nil, fmt.Errorf("while creating machine entry for %s: %w", ret.machineID, err) } @@ -175,6 +176,8 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { auth *authInput ) + ctx := c.Request.Context() + if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { auth, err = j.authTLS(c) if err != nil { @@ -198,7 +201,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { } } - err = j.DbClient.UpdateMachineScenarios(scenarios, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineScenarios(ctx, scenarios, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update scenarios list for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication @@ -208,7 +211,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { clientIP := c.ClientIP() if auth.clientMachine.IpAddress == "" { - err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(ctx, clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication @@ -218,7 +221,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { if auth.clientMachine.IpAddress != clientIP && auth.clientMachine.IpAddress != "" { log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, clientIP, auth.clientMachine.IpAddress) - err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(ctx, clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.clientMachine.MachineId, err) return nil, jwt.ErrFailedAuthentication @@ -231,7 +234,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { return nil, jwt.ErrFailedAuthentication } - if err := j.DbClient.UpdateMachineVersion(useragent[1], auth.clientMachine.ID); err != nil { + if err := j.DbClient.UpdateMachineVersion(ctx, useragent[1], auth.clientMachine.ID); err != nil { log.Errorf("unable to update machine '%s' version '%s': %s", auth.clientMachine.MachineId, useragent[1], err) log.Errorf("bad user agent from : %s", clientIP) diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 019de5fb970..b231fb22ad8 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -30,7 +30,7 @@ func TestLPMetrics(t *testing.T) { name: "empty metrics for LP", body: `{ }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing log processor data", authType: PASSWORD, }, @@ -50,7 +50,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -74,7 +74,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -98,7 +98,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing remediation component data", authType: APIKEY, }, @@ -117,7 +117,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedResponse: "", expectedMetricsCount: 1, expectedFeatureFlags: "a,b,c", @@ -138,7 +138,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "log_processors.0.datasources in body is required", authType: PASSWORD, }, @@ -157,7 +157,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedOSName: "foo", expectedOSVersion: "42", @@ -179,7 +179,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "log_processors.0.os.name in body is required", authType: PASSWORD, }, @@ -199,7 +199,7 @@ func TestLPMetrics(t *testing.T) { assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - machine, _ := dbClient.QueryMachineByID("test") + machine, _ := dbClient.QueryMachineByID(ctx, "test") metrics, _ := dbClient.GetLPUsageMetricsByMachineID(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) @@ -233,7 +233,7 @@ func TestRCMetrics(t *testing.T) { name: "empty metrics for RC", body: `{ }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing remediation component data", authType: APIKEY, }, @@ -251,7 +251,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -273,7 +273,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -295,7 +295,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing log processor data", authType: PASSWORD, }, @@ -312,7 +312,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedResponse: "", expectedMetricsCount: 1, expectedFeatureFlags: "a,b,c", @@ -331,7 +331,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedOSName: "foo", expectedOSVersion: "42", @@ -351,7 +351,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "remediation_components.0.os.name in body is required", authType: APIKEY, }, diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 3dfb0dc8197..d2760a209f9 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -687,8 +687,10 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str err error ) + ctx := context.TODO() + if machineID != "" { - owner, err = c.QueryMachineByID(machineID) + owner, err = c.QueryMachineByID(ctx, machineID) if err != nil { if !errors.Is(err, UserNotExists) { return nil, fmt.Errorf("machine '%s': %w", machineID, err) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 27d737e625e..d8c02825312 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -72,7 +72,7 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, return nil } -func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { +func (c *Client) CreateMachine(ctx context.Context, machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) if err != nil { c.Log.Warningf("CreateMachine: %s", err) @@ -82,20 +82,20 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA machineExist, err := c.Ent.Machine. Query(). Where(machine.MachineIdEQ(*machineID)). - Select(machine.FieldMachineId).Strings(c.CTX) + Select(machine.FieldMachineId).Strings(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } if len(machineExist) > 0 { if force { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(ctx) if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) } - machine, err := c.QueryMachineByID(*machineID) + machine, err := c.QueryMachineByID(ctx, *machineID) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } @@ -113,7 +113,7 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA SetIpAddress(ipAddress). SetIsValidated(isValidated). SetAuthType(authType). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) @@ -122,11 +122,11 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA return machine, nil } -func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { +func (c *Client) QueryMachineByID(ctx context.Context, machineID string) (*ent.Machine, error) { machine, err := c.Ent.Machine. Query(). Where(machine.MachineIdEQ(machineID)). - Only(c.CTX) + Only(ctx) if err != nil { c.Log.Warningf("QueryMachineByID : %s", err) return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) @@ -135,8 +135,8 @@ func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { return machine, nil } -func (c *Client) ListMachines() ([]*ent.Machine, error) { - machines, err := c.Ent.Machine.Query().All(c.CTX) +func (c *Client) ListMachines(ctx context.Context) ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "listing machines: %s", err) } @@ -144,8 +144,8 @@ func (c *Client) ListMachines() ([]*ent.Machine, error) { return machines, nil } -func (c *Client) ValidateMachine(machineID string) error { - rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(c.CTX) +func (c *Client) ValidateMachine(ctx context.Context, machineID string) error { + rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(ctx) if err != nil { return errors.Wrapf(UpdateFail, "validating machine: %s", err) } @@ -157,8 +157,8 @@ func (c *Client) ValidateMachine(machineID string) error { return nil } -func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { - machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) +func (c *Client) QueryPendingMachine(ctx context.Context) ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(ctx) if err != nil { c.Log.Warningf("QueryPendingMachine : %s", err) return nil, errors.Wrapf(QueryFail, "querying pending machines: %s", err) @@ -167,11 +167,11 @@ func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { return machines, nil } -func (c *Client) DeleteWatcher(name string) error { +func (c *Client) DeleteWatcher(ctx context.Context, name string) error { nbDeleted, err := c.Ent.Machine. Delete(). Where(machine.MachineIdEQ(name)). - Exec(c.CTX) + Exec(ctx) if err != nil { return err } @@ -183,13 +183,13 @@ func (c *Client) DeleteWatcher(name string) error { return nil } -func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { +func (c *Client) BulkDeleteWatchers(ctx context.Context, machines []*ent.Machine) (int, error) { ids := make([]int, len(machines)) for i, b := range machines { ids[i] = b.ID } - nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(c.CTX) + nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(ctx) if err != nil { return nbDeleted, err } @@ -197,8 +197,8 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(c.CTX) +func (c *Client) UpdateMachineLastHeartBeat(ctx context.Context, machineID string) error { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(ctx) if err != nil { return errors.Wrapf(UpdateFail, "updating machine last_heartbeat: %s", err) } @@ -206,11 +206,11 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { return nil } -func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { +func (c *Client) UpdateMachineScenarios(ctx context.Context, scenarios string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetUpdatedAt(time.Now().UTC()). SetScenarios(scenarios). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine in database: %w", err) } @@ -218,10 +218,10 @@ func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { return nil } -func (c *Client) UpdateMachineIP(ipAddr string, id int) error { +func (c *Client) UpdateMachineIP(ctx context.Context, ipAddr string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetIpAddress(ipAddr). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine IP in database: %w", err) } @@ -229,10 +229,10 @@ func (c *Client) UpdateMachineIP(ipAddr string, id int) error { return nil } -func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { +func (c *Client) UpdateMachineVersion(ctx context.Context, ipAddr string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetVersion(ipAddr). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine version in database: %w", err) } @@ -240,8 +240,8 @@ func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { return nil } -func (c *Client) IsMachineRegistered(machineID string) (bool, error) { - exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(c.CTX) +func (c *Client) IsMachineRegistered(ctx context.Context, machineID string) (bool, error) { + exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(ctx) if err != nil { return false, err } @@ -257,11 +257,11 @@ func (c *Client) IsMachineRegistered(machineID string) (bool, error) { return false, nil } -func (c *Client) QueryMachinesInactiveSince(t time.Time) ([]*ent.Machine, error) { +func (c *Client) QueryMachinesInactiveSince(ctx context.Context, t time.Time) ([]*ent.Machine, error) { return c.Ent.Machine.Query().Where( machine.Or( machine.And(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)), machine.And(machine.LastHeartbeatIsNil(), machine.CreatedAtLT(t)), ), - ).All(c.CTX) + ).All(ctx) } From 4a2a6632273c5db364a2acd8e02ba4184e05582a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Sep 2024 20:45:21 +0200 Subject: [PATCH 301/318] context propagation: pkg/database/bouncers (#3249) --- cmd/crowdsec-cli/clibouncer/bouncers.go | 26 ++++++++-------- pkg/apiserver/apiserver_test.go | 2 +- pkg/apiserver/controllers/v1/decisions.go | 8 +++-- pkg/apiserver/middlewares/v1/api_key.go | 18 ++++++++---- pkg/apiserver/usage_metrics_test.go | 2 +- pkg/database/bouncers.go | 36 +++++++++++------------ 6 files changed, 51 insertions(+), 41 deletions(-) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 89e91b63911..226fbb7e922 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -208,7 +208,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { return cmd } -func (cli *cliBouncers) add(bouncerName string, key string) error { +func (cli *cliBouncers) add(ctx context.Context, bouncerName string, key string) error { var err error keyLength := 32 @@ -220,7 +220,7 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { } } - _, err = cli.db.CreateBouncer(bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) + _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) if err != nil { return fmt.Errorf("unable to create bouncer: %w", err) } @@ -254,8 +254,8 @@ func (cli *cliBouncers) newAddCmd() *cobra.Command { cscli bouncers add MyBouncerName --key `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.add(args[0], key) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args[0], key) }, } @@ -304,9 +304,9 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliBouncers) delete(bouncers []string, ignoreMissing bool) error { +func (cli *cliBouncers) delete(ctx context.Context, bouncers []string, ignoreMissing bool) error { for _, bouncerID := range bouncers { - if err := cli.db.DeleteBouncer(bouncerID); err != nil { + if err := cli.db.DeleteBouncer(ctx, bouncerID); err != nil { var notFoundErr *database.BouncerNotFoundError if ignoreMissing && errors.As(err, ¬FoundErr) { return nil @@ -332,8 +332,8 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validBouncerID, - RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args, ignoreMissing) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) }, } @@ -343,7 +343,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { return cmd } -func (cli *cliBouncers) prune(duration time.Duration, force bool) error { +func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force bool) error { if duration < 2*time.Minute { if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ @@ -355,7 +355,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - bouncers, err := cli.db.QueryBouncersInactiveSince(time.Now().UTC().Add(-duration)) + bouncers, err := cli.db.QueryBouncersInactiveSince(ctx, time.Now().UTC().Add(-duration)) if err != nil { return fmt.Errorf("unable to query bouncers: %w", err) } @@ -378,7 +378,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - deleted, err := cli.db.BulkDeleteBouncers(bouncers) + deleted, err := cli.db.BulkDeleteBouncers(ctx, bouncers) if err != nil { return fmt.Errorf("unable to prune bouncers: %w", err) } @@ -403,8 +403,8 @@ func (cli *cliBouncers) newPruneCmd() *cobra.Command { DisableAutoGenTag: true, Example: `cscli bouncers prune -d 45m cscli bouncers prune -d 45m --force`, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.prune(duration, force) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, force) }, } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 0db1ee5dcdc..081fa23f15e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -297,7 +297,7 @@ func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { apiKey, err := middlewares.GenerateAPIKey(keyLength) require.NoError(t, err) - _, err = dbClient.CreateBouncer("test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) + _, err = dbClient.CreateBouncer(ctx, "test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) require.NoError(t, err) return apiKey diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 54e9b0290cc..139280ab497 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -43,6 +43,8 @@ func (c *Controller) GetDecision(gctx *gin.Context) { data []*ent.Decision ) + ctx := gctx.Request.Context() + bouncerInfo, err := getBouncerFromContext(gctx) if err != nil { gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) @@ -73,7 +75,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { } if bouncerInfo.LastPull == nil || time.Now().UTC().Sub(*bouncerInfo.LastPull) >= time.Minute { - if err := c.DBClient.UpdateBouncerLastPull(time.Now().UTC(), bouncerInfo.ID); err != nil { + if err := c.DBClient.UpdateBouncerLastPull(ctx, time.Now().UTC(), bouncerInfo.ID); err != nil { log.Errorf("failed to update bouncer last pull: %v", err) } } @@ -370,6 +372,8 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en func (c *Controller) StreamDecision(gctx *gin.Context) { var err error + ctx := gctx.Request.Context() + streamStartTime := time.Now().UTC() bouncerInfo, err := getBouncerFromContext(gctx) @@ -400,7 +404,7 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { if err == nil { //Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions - if err := c.DBClient.UpdateBouncerLastPull(streamStartTime, bouncerInfo.ID); err != nil { + if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } } diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index e822666db0f..d438c9b15a4 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -64,6 +64,8 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + ctx := c.Request.Context() + extractedCN, err := a.TlsAuth.ValidateCert(c) if err != nil { logger.Warn(err) @@ -73,7 +75,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger = logger.WithField("cn", extractedCN) bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) - bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) + bouncer, err := a.DbClient.SelectBouncerByName(ctx, bouncerName) // This is likely not the proper way, but isNotFound does not seem to work if err != nil && strings.Contains(err.Error(), "bouncer not found") { @@ -87,7 +89,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) - bouncer, err = a.DbClient.CreateBouncer(bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) + bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil @@ -112,9 +114,11 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + ctx := c.Request.Context() + hashStr := HashSHA512(val[0]) - bouncer, err := a.DbClient.SelectBouncer(hashStr) + bouncer, err := a.DbClient.SelectBouncer(ctx, hashStr) if err != nil { logger.Errorf("while fetching bouncer info: %s", err) return nil @@ -132,6 +136,8 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return func(c *gin.Context) { var bouncer *ent.Bouncer + ctx := c.Request.Context() + clientIP := c.ClientIP() logger := log.WithField("ip", clientIP) @@ -153,7 +159,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { logger = logger.WithField("name", bouncer.Name) if bouncer.IPAddress == "" { - if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -166,7 +172,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { if bouncer.IPAddress != clientIP && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, clientIP, bouncer.IPAddress) - if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -182,7 +188,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } if bouncer.Version != useragent[1] || bouncer.Type != useragent[0] { - if err := a.DbClient.UpdateBouncerTypeAndVersion(useragent[0], useragent[1], bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerTypeAndVersion(ctx, useragent[0], useragent[1], bouncer.ID); err != nil { logger.Errorf("failed to update bouncer version and type: %s", err) c.JSON(http.StatusForbidden, gin.H{"message": "bad user agent"}) c.Abort() diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index b231fb22ad8..4d17b0d6ed8 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -371,7 +371,7 @@ func TestRCMetrics(t *testing.T) { assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - bouncer, _ := dbClient.SelectBouncerByName("test") + bouncer, _ := dbClient.SelectBouncerByName(ctx, "test") metrics, _ := dbClient.GetBouncerUsageMetricsByName(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 6ff308ff786..04ef830ae72 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -41,8 +41,8 @@ func (c *Client) BouncerUpdateBaseMetrics(ctx context.Context, bouncerName strin return nil } -func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) +func (c *Client) SelectBouncer(ctx context.Context, apiKeyHash string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(ctx) if err != nil { return nil, err } @@ -50,8 +50,8 @@ func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { return result, nil } -func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(c.CTX) +func (c *Client) SelectBouncerByName(ctx context.Context, bouncerName string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(ctx) if err != nil { return nil, err } @@ -68,14 +68,14 @@ func (c *Client) ListBouncers(ctx context.Context) ([]*ent.Bouncer, error) { return result, nil } -func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { +func (c *Client) CreateBouncer(ctx context.Context, name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { bouncer, err := c.Ent.Bouncer. Create(). SetName(name). SetAPIKey(apiKey). SetRevoked(false). SetAuthType(authType). - Save(c.CTX) + Save(ctx) if err != nil { if ent.IsConstraintError(err) { return nil, fmt.Errorf("bouncer %s already exists", name) @@ -87,11 +87,11 @@ func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authTy return bouncer, nil } -func (c *Client) DeleteBouncer(name string) error { +func (c *Client) DeleteBouncer(ctx context.Context, name string) error { nbDeleted, err := c.Ent.Bouncer. Delete(). Where(bouncer.NameEQ(name)). - Exec(c.CTX) + Exec(ctx) if err != nil { return err } @@ -103,13 +103,13 @@ func (c *Client) DeleteBouncer(name string) error { return nil } -func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { +func (c *Client) BulkDeleteBouncers(ctx context.Context, bouncers []*ent.Bouncer) (int, error) { ids := make([]int, len(bouncers)) for i, b := range bouncers { ids[i] = b.ID } - nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(c.CTX) + nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(ctx) if err != nil { return nbDeleted, fmt.Errorf("unable to delete bouncers: %w", err) } @@ -117,10 +117,10 @@ func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { +func (c *Client) UpdateBouncerLastPull(ctx context.Context, lastPull time.Time, id int) error { _, err := c.Ent.Bouncer.UpdateOneID(id). SetLastPull(lastPull). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine last pull in database: %w", err) } @@ -128,8 +128,8 @@ func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { return nil } -func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { - _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(c.CTX) +func (c *Client) UpdateBouncerIP(ctx context.Context, ipAddr string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(ctx) if err != nil { return fmt.Errorf("unable to update bouncer ip address in database: %w", err) } @@ -137,8 +137,8 @@ func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { return nil } -func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id int) error { - _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(c.CTX) +func (c *Client) UpdateBouncerTypeAndVersion(ctx context.Context, bType string, version string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(ctx) if err != nil { return fmt.Errorf("unable to update bouncer type and version in database: %w", err) } @@ -146,7 +146,7 @@ func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id in return nil } -func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) { +func (c *Client) QueryBouncersInactiveSince(ctx context.Context, t time.Time) ([]*ent.Bouncer, error) { return c.Ent.Bouncer.Query().Where( // poor man's coalesce bouncer.Or( @@ -156,5 +156,5 @@ func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) bouncer.CreatedAtLT(t), ), ), - ).All(c.CTX) + ).All(ctx) } From 1133afe58ddfb190b8397bc6604ba70217130d35 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 23 Sep 2024 17:33:46 +0200 Subject: [PATCH 302/318] context propagation: pkg/database/{lock,decision} (#3251) * context propagation: pkg/database/lock * QueryAllDecisionsWithFilters(ctx...), QueryExpiredDecisionsWithFilters(ctx...) * more Query...Decision...(ctx..) * rest of decisions * lint --- pkg/apiserver/apic.go | 11 +-- pkg/apiserver/controllers/v1/decisions.go | 82 ++++++++++++++--------- pkg/apiserver/papi_cmd.go | 8 ++- pkg/database/decisions.go | 79 +++++++++------------- pkg/database/lock.go | 36 ++++++---- pkg/exprhelpers/helpers.go | 17 +++-- 6 files changed, 133 insertions(+), 100 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index c79d5f88e3f..8b09e3e5f9d 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -426,6 +426,7 @@ func (a *apic) CAPIPullIsOld() (bool, error) { } func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, deleteCounters map[string]map[string]int) (int, error) { + ctx := context.TODO() nbDeleted := 0 for _, decision := range deletedDecisions { @@ -438,7 +439,7 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet filter["scopes"] = []string{*decision.Scope} } - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return 0, fmt.Errorf("expiring decisions error: %w", err) } @@ -458,6 +459,8 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { var nbDeleted int + ctx := context.TODO() + for _, decisions := range deletedDecisions { scope := decisions.Scope @@ -470,7 +473,7 @@ func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisi filter["scopes"] = []string{*scope} } - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return 0, fmt.Errorf("expiring decisions error: %w", err) } @@ -640,7 +643,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { log.Debug("Acquiring lock for pullCAPI") - err = a.dbClient.AcquirePullCAPILock() + err = a.dbClient.AcquirePullCAPILock(ctx) if a.dbClient.IsLocked(err) { log.Info("PullCAPI is already running, skipping") return nil @@ -650,7 +653,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { defer func() { log.Debug("Releasing lock for pullCAPI") - if err := a.dbClient.ReleasePullCAPILock(); err != nil { + if err := a.dbClient.ReleasePullCAPILock(ctx); err != nil { log.Errorf("while releasing lock: %v", err) } }() diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 139280ab497..d1aa79bbf07 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -1,8 +1,8 @@ package v1 import ( + "context" "encoding/json" - "fmt" "net/http" "strconv" "time" @@ -52,7 +52,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { return } - data, err = c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query()) + data, err = c.DBClient.QueryDecisionWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) @@ -93,7 +93,9 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { return } - nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(decisionID) + ctx := gctx.Request.Context() + + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(ctx, decisionID) if err != nil { c.HandleDBErrors(gctx, err) @@ -115,7 +117,9 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { } func (c *Controller) DeleteDecisions(gctx *gin.Context) { - nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(gctx.Request.URL.Query()) + ctx := gctx.Request.Context() + + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) @@ -136,32 +140,37 @@ func (c *Controller) DeleteDecisions(gctx *gin.Context) { gctx.JSON(http.StatusOK, deleteDecisionResp) } -func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFunc func(map[string][]string) ([]*ent.Decision, error)) error { +func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFunc func(context.Context, map[string][]string) ([]*ent.Decision, error)) error { // respBuffer := bytes.NewBuffer([]byte{}) - limit := 30000 //FIXME : make it configurable + limit := 30000 // FIXME : make it configurable needComma := false lastId := 0 - limitStr := fmt.Sprintf("%d", limit) + ctx := gctx.Request.Context() + + limitStr := strconv.Itoa(limit) filters["limit"] = []string{limitStr} + for { if lastId > 0 { - lastIdStr := fmt.Sprintf("%d", lastId) + lastIdStr := strconv.Itoa(lastId) filters["id_gt"] = []string{lastIdStr} } - data, err := dbFunc(filters) + data, err := dbFunc(ctx, filters) if err != nil { return err } + if len(data) > 0 { lastId = data[len(data)-1].ID + results := FormatDecisions(data) for _, decision := range results { decisionJSON, _ := json.Marshal(decision) if needComma { - //respBuffer.Write([]byte(",")) + // respBuffer.Write([]byte(",")) gctx.Writer.WriteString(",") } else { needComma = true @@ -174,10 +183,12 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return err } - //respBuffer.Reset() + // respBuffer.Reset() } } + log.Debugf("startup: %d decisions returned (limit: %d, lastid: %d)", len(data), limit, lastId) + if len(data) < limit { gctx.Writer.Flush() @@ -188,32 +199,37 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return nil } -func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(*time.Time, map[string][]string) ([]*ent.Decision, error)) error { - //respBuffer := bytes.NewBuffer([]byte{}) - limit := 30000 //FIXME : make it configurable +func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(context.Context, *time.Time, map[string][]string) ([]*ent.Decision, error)) error { + // respBuffer := bytes.NewBuffer([]byte{}) + limit := 30000 // FIXME : make it configurable needComma := false lastId := 0 - limitStr := fmt.Sprintf("%d", limit) + ctx := gctx.Request.Context() + + limitStr := strconv.Itoa(limit) filters["limit"] = []string{limitStr} + for { if lastId > 0 { - lastIdStr := fmt.Sprintf("%d", lastId) + lastIdStr := strconv.Itoa(lastId) filters["id_gt"] = []string{lastIdStr} } - data, err := dbFunc(lastPull, filters) + data, err := dbFunc(ctx, lastPull, filters) if err != nil { return err } + if len(data) > 0 { lastId = data[len(data)-1].ID + results := FormatDecisions(data) for _, decision := range results { decisionJSON, _ := json.Marshal(decision) if needComma { - //respBuffer.Write([]byte(",")) + // respBuffer.Write([]byte(",")) gctx.Writer.WriteString(",") } else { needComma = true @@ -226,10 +242,12 @@ func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPul return err } - //respBuffer.Reset() + // respBuffer.Reset() } } + log.Debugf("startup: %d decisions returned (limit: %d, lastid: %d)", len(data), limit, lastId) + if len(data) < limit { gctx.Writer.Flush() @@ -261,7 +279,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B } gctx.Writer.WriteString(`], "deleted": [`) - //Expired decisions + // Expired decisions err = writeStartupDecisions(gctx, filters, c.DBClient.QueryExpiredDecisionsWithFilters) if err != nil { log.Errorf("failed sending expired decisions for startup: %v", err) @@ -302,8 +320,12 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B } func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *ent.Bouncer, streamStartTime time.Time, filters map[string][]string) error { - var data []*ent.Decision - var err error + var ( + data []*ent.Decision + err error + ) + + ctx := gctx.Request.Context() ret := make(map[string][]*models.Decision, 0) ret["new"] = []*models.Decision{} @@ -311,7 +333,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en if val, ok := gctx.Request.URL.Query()["startup"]; ok { if val[0] == "true" { - data, err = c.DBClient.QueryAllDecisionsWithFilters(filters) + data, err = c.DBClient.QueryAllDecisionsWithFilters(ctx, filters) if err != nil { log.Errorf("failed querying decisions: %v", err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -322,7 +344,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en ret["new"] = FormatDecisions(data) // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsWithFilters(filters) + data, err = c.DBClient.QueryExpiredDecisionsWithFilters(ctx, filters) if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -339,14 +361,14 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en } // getting new decisions - data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(bouncerInfo.LastPull, filters) + data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(ctx, bouncerInfo.LastPull, filters) if err != nil { log.Errorf("unable to query new decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return err } - //data = KeepLongestDecision(data) + // data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) since := time.Time{} @@ -355,7 +377,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en } // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(&since, filters) // do we want to give exactly lastPull time ? + data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(ctx, &since, filters) // do we want to give exactly lastPull time ? if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -384,8 +406,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { } if gctx.Request.Method == http.MethodHead { - //For HEAD, just return as the bouncer won't get a body anyway, so no need to query the db - //We also don't update the last pull time, as it would mess with the delta sent on the next request (if done without startup=true) + // For HEAD, just return as the bouncer won't get a body anyway, so no need to query the db + // We also don't update the last pull time, as it would mess with the delta sent on the next request (if done without startup=true) gctx.String(http.StatusOK, "") return @@ -403,7 +425,7 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { } if err == nil { - //Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions + // Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index 943eb4139de..18b19b03418 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -43,6 +43,8 @@ type listUnsubscribe struct { } func DecisionCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + switch message.Header.OperationCmd { case "delete": data, err := json.Marshal(message.Data) @@ -65,7 +67,7 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { filter := make(map[string][]string) filter["uuid"] = UUIDs - _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return fmt.Errorf("unable to expire decisions %+v: %w", UUIDs, err) } @@ -168,6 +170,8 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { } func ManagementCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + if sync { p.Logger.Infof("Ignoring management command from PAPI in sync mode") return nil @@ -195,7 +199,7 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { filter["origin"] = []string{types.ListOrigin} filter["scenario"] = []string{unsubscribeMsg.Name} - _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return fmt.Errorf("unable to expire decisions for list %s : %w", unsubscribeMsg.Name, err) } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 5fd4757c883..8547990c25f 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -121,7 +121,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] return query, nil } -func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryAllDecisionsWithFilters(ctx context.Context, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -138,7 +138,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") @@ -147,7 +147,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e return data, nil } -func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsWithFilters(ctx context.Context, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) @@ -165,7 +165,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return []*ent.Decision{}, errors.Wrap(QueryFail, "get expired decisions with filters") } - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") @@ -196,7 +196,7 @@ func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*Decisions return r, nil } -func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryDecisionWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Decision, error) { var data []*ent.Decision var err error @@ -218,7 +218,7 @@ func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Dec decision.FieldValue, decision.FieldScope, decision.FieldOrigin, - ).Scan(c.CTX, &data) + ).Scan(ctx, &data) if err != nil { c.Log.Warningf("QueryDecisionWithFilter : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "query decision failed") @@ -255,7 +255,7 @@ func longestDecisionForScopeTypeValue(s *sql.Selector) { ) } -func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsSinceWithFilters(ctx context.Context, since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) @@ -277,7 +277,7 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions with filters") @@ -286,7 +286,7 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters return data, nil } -func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryNewDecisionsSinceWithFilters(ctx context.Context, since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -308,7 +308,7 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) @@ -317,20 +317,7 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map return data, nil } -func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { - toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) - if err != nil { - c.Log.Warningf("DeleteDecisionById : %s", err) - return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) - } - - count, err := c.DeleteDecisions(toDelete) - c.Log.Debugf("deleted %d decisions", count) - - return toDelete, err -} - -func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -433,13 +420,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } - toDelete, err := decisions.All(c.CTX) + toDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } - count, err := c.DeleteDecisions(toDelete) + count, err := c.DeleteDecisions(ctx, toDelete) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") @@ -449,7 +436,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, } // ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items -func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -558,13 +545,13 @@ func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } - DecisionsToDelete, err := decisions.All(c.CTX) + DecisionsToDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("ExpireDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") } - count, err := c.ExpireDecisions(DecisionsToDelete) + count, err := c.ExpireDecisions(ctx, DecisionsToDelete) if err != nil { return "0", nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) } @@ -583,13 +570,13 @@ func decisionIDs(decisions []*ent.Decision) []int { // ExpireDecisions sets the expiration of a list of decisions to now() // It returns the number of impacted decisions for the CAPI/PAPI -func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { +func (c *Client) ExpireDecisions(ctx context.Context, decisions []*ent.Decision) (int, error) { if len(decisions) <= decisionDeleteBulkSize { ids := decisionIDs(decisions) rows, err := c.Ent.Decision.Update().Where( decision.IDIn(ids...), - ).SetUntil(time.Now().UTC()).Save(c.CTX) + ).SetUntil(time.Now().UTC()).Save(ctx) if err != nil { return 0, fmt.Errorf("expire decisions with provided filter: %w", err) } @@ -602,7 +589,7 @@ func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { total := 0 for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { - rows, err := c.ExpireDecisions(chunk) + rows, err := c.ExpireDecisions(ctx, chunk) if err != nil { return total, err } @@ -615,13 +602,13 @@ func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { // DeleteDecisions removes a list of decisions from the database // It returns the number of impacted decisions for the CAPI/PAPI -func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { +func (c *Client) DeleteDecisions(ctx context.Context, decisions []*ent.Decision) (int, error) { if len(decisions) < decisionDeleteBulkSize { ids := decisionIDs(decisions) rows, err := c.Ent.Decision.Delete().Where( decision.IDIn(ids...), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { return 0, fmt.Errorf("hard delete decisions with provided filter: %w", err) } @@ -634,7 +621,7 @@ func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { tot := 0 for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { - rows, err := c.DeleteDecisions(chunk) + rows, err := c.DeleteDecisions(ctx, chunk) if err != nil { return tot, err } @@ -646,8 +633,8 @@ func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { } // ExpireDecision set the expiration of a decision to now() -func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error) { - toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) +func (c *Client) ExpireDecisionByID(ctx context.Context, decisionID int) (int, []*ent.Decision, error) { + toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(ctx) // XXX: do we want 500 or 404 here? if err != nil || len(toUpdate) == 0 { @@ -659,12 +646,12 @@ func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error return 0, nil, ItemNotFound } - count, err := c.ExpireDecisions(toUpdate) + count, err := c.ExpireDecisions(ctx, toUpdate) return count, toUpdate, err } -func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { +func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int @@ -682,7 +669,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } - count, err = decisions.Count(c.CTX) + count, err = decisions.Count(ctx) if err != nil { return 0, errors.Wrapf(err, "fail to count decisions") } @@ -690,7 +677,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return count, nil } -func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) { +func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int @@ -710,7 +697,7 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) - count, err = decisions.Count(c.CTX) + count, err = decisions.Count(ctx) if err != nil { return 0, fmt.Errorf("fail to count decisions: %w", err) } @@ -718,7 +705,7 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) return count, nil } -func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.Duration, error) { +func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decisionValue string) (time.Duration, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -740,7 +727,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D decisions = decisions.Order(ent.Desc(decision.FieldUntil)) - decision, err := decisions.First(c.CTX) + decision, err := decisions.First(ctx) if err != nil && !ent.IsNotFound(err) { return 0, fmt.Errorf("fail to get decision: %w", err) } @@ -752,7 +739,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D return decision.Until.Sub(time.Now().UTC()), nil } -func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { +func (c *Client) CountDecisionsSinceByValue(ctx context.Context, decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) @@ -768,7 +755,7 @@ func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Tim return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } - count, err := decisions.Count(c.CTX) + count, err := decisions.Count(ctx) if err != nil { return 0, errors.Wrapf(err, "fail to count decisions") } diff --git a/pkg/database/lock.go b/pkg/database/lock.go index d25b71870f0..474228a069c 100644 --- a/pkg/database/lock.go +++ b/pkg/database/lock.go @@ -1,6 +1,7 @@ package database import ( + "context" "time" "github.com/pkg/errors" @@ -16,40 +17,45 @@ const ( CapiPullLockName = "pullCAPI" ) -func (c *Client) AcquireLock(name string) error { +func (c *Client) AcquireLock(ctx context.Context, name string) error { log.Debugf("acquiring lock %s", name) _, err := c.Ent.Lock.Create(). SetName(name). SetCreatedAt(types.UtcNow()). - Save(c.CTX) + Save(ctx) + if ent.IsConstraintError(err) { return err } + if err != nil { return errors.Wrapf(InsertFail, "insert lock: %s", err) } + return nil } -func (c *Client) ReleaseLock(name string) error { +func (c *Client) ReleaseLock(ctx context.Context, name string) error { log.Debugf("releasing lock %s", name) - _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) + _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } -func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { +func (c *Client) ReleaseLockWithTimeout(ctx context.Context, name string, timeout int) error { log.Debugf("releasing lock %s with timeout of %d minutes", name, timeout) + _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(name), lock.CreatedAtLT(time.Now().UTC().Add(-time.Duration(timeout)*time.Minute)), - ).Exec(c.CTX) - + ).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } @@ -57,23 +63,25 @@ func (c *Client) IsLocked(err error) bool { return ent.IsConstraintError(err) } -func (c *Client) AcquirePullCAPILock() error { - - /*delete orphan "old" lock if present*/ - err := c.ReleaseLockWithTimeout(CapiPullLockName, CAPIPullLockTimeout) +func (c *Client) AcquirePullCAPILock(ctx context.Context) error { + // delete orphan "old" lock if present + err := c.ReleaseLockWithTimeout(ctx, CapiPullLockName, CAPIPullLockTimeout) if err != nil { log.Errorf("unable to release pullCAPI lock: %s", err) } - return c.AcquireLock(CapiPullLockName) + + return c.AcquireLock(ctx, CapiPullLockName) } -func (c *Client) ReleasePullCAPILock() error { +func (c *Client) ReleasePullCAPILock(ctx context.Context) error { log.Debugf("deleting lock %s", CapiPullLockName) + _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(CapiPullLockName), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 2ca7d0be79a..6b7eb0840e9 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -2,6 +2,7 @@ package exprhelpers import ( "bufio" + "context" "encoding/base64" "errors" "fmt" @@ -592,7 +593,10 @@ func GetDecisionsCount(params ...any) (any, error) { return 0, nil } - count, err := dbClient.CountDecisionsByValue(value) + + ctx := context.TODO() + + count, err := dbClient.CountDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -613,8 +617,11 @@ func GetDecisionsSinceCount(params ...any) (any, error) { log.Errorf("Failed to parse since parameter '%s' : %s", since, err) return 0, nil } + + ctx := context.TODO() sinceTime := time.Now().UTC().Add(-sinceDuration) - count, err := dbClient.CountDecisionsSinceByValue(value, sinceTime) + + count, err := dbClient.CountDecisionsSinceByValue(ctx, value, sinceTime) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -628,7 +635,8 @@ func GetActiveDecisionsCount(params ...any) (any, error) { log.Error("No database config to call GetActiveDecisionsCount()") return 0, nil } - count, err := dbClient.CountActiveDecisionsByValue(value) + ctx := context.TODO() + count, err := dbClient.CountActiveDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions count from value '%s'", value) return 0, err @@ -642,7 +650,8 @@ func GetActiveDecisionsTimeLeft(params ...any) (any, error) { log.Error("No database config to call GetActiveDecisionsTimeLeft()") return 0, nil } - timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(value) + ctx := context.TODO() + timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions time left from value '%s'", value) return 0, err From 3945a991bd265b765e9ab80e28aff70c21c707c9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:13:45 +0200 Subject: [PATCH 303/318] context propagation: pkg/database/alerts (#3252) * alerts * drop CTX from dbclient * lint * pkg/database/alerts: context.TODO() * cscli: context.Background() -> cmd.Context() --- .golangci.yml | 4 +- cmd/crowdsec-cli/clialert/alerts.go | 20 ++-- cmd/crowdsec-cli/cliconsole/console.go | 8 +- cmd/crowdsec-cli/clidecision/decisions.go | 22 ++--- cmd/crowdsec-cli/clilapi/lapi.go | 2 +- .../clinotifications/notifications.go | 8 +- pkg/apiserver/apic.go | 10 +- pkg/apiserver/apic_test.go | 10 +- pkg/apiserver/controllers/v1/alerts.go | 18 +++- pkg/apiserver/controllers/v1/decisions.go | 4 +- pkg/apiserver/controllers/v1/metrics.go | 15 ++- pkg/apiserver/decisions_test.go | 10 +- pkg/apiserver/middlewares/v1/cache.go | 2 +- pkg/apiserver/middlewares/v1/crl.go | 10 +- pkg/apiserver/middlewares/v1/jwt.go | 4 +- pkg/apiserver/papi_cmd.go | 4 +- pkg/database/alerts.go | 97 +++++++++---------- pkg/database/database.go | 2 - pkg/database/decisions.go | 6 +- pkg/database/flush.go | 6 +- pkg/database/metrics.go | 2 +- 21 files changed, 141 insertions(+), 123 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b76e2613be7..54c0acb0644 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,7 +20,7 @@ linters-settings: maintidx: # raise this after refactoring - under: 16 + under: 15 misspell: locale: US @@ -118,7 +118,7 @@ linters-settings: arguments: [6] - name: function-length # lower this after refactoring - arguments: [110, 235] + arguments: [110, 237] - name: get-return disabled: true - name: increment-decrement diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index dbb7ca14db5..75454e945f2 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -235,7 +235,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { return cmd } -func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { +func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { var err error *alertListFilter.ScopeEquals, err = SanitizeScope(*alertListFilter.ScopeEquals, *alertListFilter.IPEquals, *alertListFilter.RangeEquals) @@ -311,7 +311,7 @@ func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, alertListFilter.Contains = new(bool) } - alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) + alerts, _, err := cli.client.Alerts.List(ctx, alertListFilter) if err != nil { return fmt.Errorf("unable to list alerts: %w", err) } @@ -354,7 +354,7 @@ cscli alerts list --type ban`, Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.list(alertListFilter, limit, contained, printMachine) + return cli.list(cmd.Context(), alertListFilter, limit, contained, printMachine) }, } @@ -377,7 +377,7 @@ cscli alerts list --type ban`, return cmd } -func (cli *cliAlerts) delete(delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { +func (cli *cliAlerts) delete(ctx context.Context, delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { var err error if !deleteAll { @@ -423,12 +423,12 @@ func (cli *cliAlerts) delete(delFilter apiclient.AlertsDeleteOpts, activeDecisio var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), delFilter) + alerts, _, err = cli.client.Alerts.Delete(ctx, delFilter) if err != nil { return fmt.Errorf("unable to delete alerts: %w", err) } } else { - alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) + alerts, _, err = cli.client.Alerts.DeleteOne(ctx, delAlertByID) if err != nil { return fmt.Errorf("unable to delete alert: %w", err) } @@ -480,7 +480,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.delete(delFilter, activeDecision, deleteAll, delAlertByID, contained) + return cli.delete(cmd.Context(), delFilter, activeDecision, deleteAll, delAlertByID, contained) }, } @@ -498,7 +498,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return cmd } -func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { +func (cli *cliAlerts) inspect(ctx context.Context, details bool, alertIDs ...string) error { cfg := cli.cfg() for _, alertID := range alertIDs { @@ -507,7 +507,7 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { return fmt.Errorf("bad alert id %s", alertID) } - alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) + alert, _, err := cli.client.Alerts.GetByID(ctx, id) if err != nil { return fmt.Errorf("can't find alert with id %s: %w", alertID, err) } @@ -551,7 +551,7 @@ func (cli *cliAlerts) newInspectCmd() *cobra.Command { _ = cmd.Help() return errors.New("missing alert_id") } - return cli.inspect(details, args...) + return cli.inspect(cmd.Context(), details, args...) }, } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index af1ba316c2d..448ddcee7fa 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -66,7 +66,7 @@ func (cli *cliConsole) NewCommand() *cobra.Command { return cmd } -func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []string, opts []string) error { +func (cli *cliConsole) enroll(ctx context.Context, key string, name string, overwrite bool, tags []string, opts []string) error { cfg := cli.cfg() password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) @@ -127,7 +127,7 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st VersionPrefix: "v3", }) - resp, err := c.Auth.EnrollWatcher(context.Background(), key, name, tags, overwrite) + resp, err := c.Auth.EnrollWatcher(ctx, key, name, tags, overwrite) if err != nil { return fmt.Errorf("could not enroll instance: %w", err) } @@ -173,8 +173,8 @@ After running this command your will need to validate the enrollment in the weba valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.enroll(args[0], name, overwrite, tags, opts) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.enroll(cmd.Context(), args[0], name, overwrite, tags, opts) }, } diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index b82ebe3086e..1f8781a3716 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -170,7 +170,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } -func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { +func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { var err error *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) @@ -249,7 +249,7 @@ func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, con filter.Contains = new(bool) } - alerts, _, err := cli.client.Alerts.List(context.Background(), filter) + alerts, _, err := cli.client.Alerts.List(ctx, filter) if err != nil { return fmt.Errorf("unable to retrieve decisions: %w", err) } @@ -293,7 +293,7 @@ cscli decisions list --origin lists --scenario list_name Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.list(filter, NoSimu, contained, printMachine) + return cli.list(cmd.Context(), filter, NoSimu, contained, printMachine) }, } @@ -317,7 +317,7 @@ cscli decisions list --origin lists --scenario list_name return cmd } -func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { +func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { alerts := models.AddAlertsRequest{} origin := types.CscliOrigin capacity := int32(0) @@ -386,7 +386,7 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a } alerts = append(alerts, &alert) - _, _, err = cli.client.Alerts.Add(context.Background(), alerts) + _, _, err = cli.client.Alerts.Add(ctx, alerts) if err != nil { return err } @@ -419,7 +419,7 @@ cscli decisions add --scope username --value foobar Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.add(addIP, addRange, addDuration, addValue, addScope, addReason, addType) + return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType) }, } @@ -436,7 +436,7 @@ cscli decisions add --scope username --value foobar return cmd } -func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { +func (cli *cliDecisions) delete(ctx context.Context, delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { var err error /*take care of shorthand options*/ @@ -480,7 +480,7 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci var decisions *models.DeleteDecisionResponse if delDecisionID == "" { - decisions, _, err = cli.client.Decisions.Delete(context.Background(), delFilter) + decisions, _, err = cli.client.Decisions.Delete(ctx, delFilter) if err != nil { return fmt.Errorf("unable to delete decisions: %w", err) } @@ -489,7 +489,7 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) } - decisions, _, err = cli.client.Decisions.DeleteOne(context.Background(), delDecisionID) + decisions, _, err = cli.client.Decisions.DeleteOne(ctx, delDecisionID) if err != nil { return fmt.Errorf("unable to delete decision: %w", err) } @@ -543,8 +543,8 @@ cscli decisions delete --origin lists --scenario list_name return nil }, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.delete(delFilter, delDecisionID, contained) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.delete(cmd.Context(), delFilter, delDecisionID, contained) }, } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index 75fdc5c239d..bb721eefe03 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -68,7 +68,7 @@ func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login Scenarios: itemsForAPI, } - _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) + _, _, err = client.Auth.AuthenticateWatcher(ctx, t) if err != nil { return false, err } diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 314f97db23e..0641dd1a7d4 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -368,9 +368,9 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(_ *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, args []string) error { var err error - alert, err = cli.fetchAlertFromArgString(args[0]) + alert, err = cli.fetchAlertFromArgString(cmd.Context(), args[0]) if err != nil { return err } @@ -447,7 +447,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return cmd } -func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Alert, error) { +func (cli *cliNotifications) fetchAlertFromArgString(ctx context.Context, toParse string) (*models.Alert, error) { cfg := cli.cfg() id, err := strconv.Atoi(toParse) @@ -470,7 +470,7 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al return nil, fmt.Errorf("error creating the client for the API: %w", err) } - alert, _, err := client.Alerts.GetByID(context.Background(), id) + alert, _, err := client.Alerts.GetByID(ctx, id) if err != nil { return nil, fmt.Errorf("can't find alert with id %d: %w", id, err) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 8b09e3e5f9d..9b56fef6549 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -406,13 +406,13 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { } } -func (a *apic) CAPIPullIsOld() (bool, error) { +func (a *apic) CAPIPullIsOld(ctx context.Context) (bool, error) { /*only pull community blocklist if it's older than 1h30 */ alerts := a.dbClient.Ent.Alert.Query() alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID))) alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert - count, err := alerts.Count(a.dbClient.CTX) + count, err := alerts.Count(ctx) if err != nil { return false, fmt.Errorf("while looking for CAPI alert: %w", err) } @@ -634,7 +634,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { } if !forcePull { - if lastPullIsOld, err := a.CAPIPullIsOld(); err != nil { + if lastPullIsOld, err := a.CAPIPullIsOld(ctx); err != nil { return err } else if !lastPullIsOld { return nil @@ -769,6 +769,8 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis } func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { + ctx := context.TODO() + for _, alert := range alertsFromCapi { setAlertScenario(alert, addCounters, deleteCounters) log.Debugf("%s has %d decisions", *alert.Source.Scope, len(alert.Decisions)) @@ -777,7 +779,7 @@ func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string log.Warningf("sqlite is not using WAL mode, LAPI might become unresponsive when inserting the community blocklist") } - alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(alert) + alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(ctx, alert) if err != nil { return fmt.Errorf("while saving alert from %s: %w", *alert.Source.Scope, err) } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 97943b495e5..3bb158acf35 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -113,7 +113,9 @@ func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { func TestAPICCAPIPullIsOld(t *testing.T) { api := getAPIC(t) - isOld, err := api.CAPIPullIsOld() + ctx := context.Background() + + isOld, err := api.CAPIPullIsOld(ctx) require.NoError(t, err) assert.True(t, isOld) @@ -124,7 +126,7 @@ func TestAPICCAPIPullIsOld(t *testing.T) { SetScope("Country"). SetValue("Blah"). SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) + SaveX(ctx) api.dbClient.Ent.Alert.Create(). SetCreatedAt(time.Now()). @@ -132,9 +134,9 @@ func TestAPICCAPIPullIsOld(t *testing.T) { AddDecisions( decision, ). - SaveX(context.Background()) + SaveX(ctx) - isOld, err = api.CAPIPullIsOld() + isOld, err = api.CAPIPullIsOld(ctx) require.NoError(t, err) assert.False(t, isOld) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 84b3094865c..d1f93228512 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -127,6 +127,7 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest + ctx := gctx.Request.Context() machineID, _ := getMachineIDFromContext(gctx) if err := gctx.ShouldBindJSON(&input); err != nil { @@ -239,7 +240,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { c.DBClient.CanFlush = false } - alerts, err := c.DBClient.CreateAlert(machineID, input) + alerts, err := c.DBClient.CreateAlert(ctx, machineID, input) c.DBClient.CanFlush = true if err != nil { @@ -261,7 +262,9 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { // FindAlerts: returns alerts from the database based on the specified filter func (c *Controller) FindAlerts(gctx *gin.Context) { - result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query()) + ctx := gctx.Request.Context() + + result, err := c.DBClient.QueryAlertWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) return @@ -279,6 +282,7 @@ func (c *Controller) FindAlerts(gctx *gin.Context) { // FindAlertByID returns the alert associated with the ID func (c *Controller) FindAlertByID(gctx *gin.Context) { + ctx := gctx.Request.Context() alertIDStr := gctx.Param("alert_id") alertID, err := strconv.Atoi(alertIDStr) @@ -287,7 +291,7 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) { return } - result, err := c.DBClient.GetAlertByID(alertID) + result, err := c.DBClient.GetAlertByID(ctx, alertID) if err != nil { c.HandleDBErrors(gctx, err) return @@ -307,6 +311,8 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) { func (c *Controller) DeleteAlertByID(gctx *gin.Context) { var err error + ctx := gctx.Request.Context() + incomingIP := gctx.ClientIP() if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) @@ -321,7 +327,7 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { return } - err = c.DBClient.DeleteAlertByID(decisionID) + err = c.DBClient.DeleteAlertByID(ctx, decisionID) if err != nil { c.HandleDBErrors(gctx, err) return @@ -334,13 +340,15 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { // DeleteAlerts deletes alerts from the database based on the specified filter func (c *Controller) DeleteAlerts(gctx *gin.Context) { + ctx := gctx.Request.Context() + incomingIP := gctx.ClientIP() if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } - nbDeleted, err := c.DBClient.DeleteAlertWithFilter(gctx.Request.URL.Query()) + nbDeleted, err := c.DBClient.DeleteAlertWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) return diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index d1aa79bbf07..ffefffc226b 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -264,7 +264,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B gctx.Writer.Header().Set("Content-Type", "application/json") gctx.Writer.Header().Set("Transfer-Encoding", "chunked") gctx.Writer.WriteHeader(http.StatusOK) - gctx.Writer.WriteString(`{"new": [`) //No need to check for errors, the doc says it always returns nil + gctx.Writer.WriteString(`{"new": [`) // No need to check for errors, the doc says it always returns nil // if the blocker just started, return all decisions if val, ok := gctx.Request.URL.Query()["startup"]; ok && val[0] == "true" { @@ -340,7 +340,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en return err } - //data = KeepLongestDecision(data) + // data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) // getting expired decisions diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go index ddb38512a11..4f6ee0986eb 100644 --- a/pkg/apiserver/controllers/v1/metrics.go +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -68,7 +68,8 @@ func PrometheusBouncersHasEmptyDecision(c *gin.Context) { bouncer, _ := getBouncerFromContext(c) if bouncer != nil { LapiNilDecisions.With(prometheus.Labels{ - "bouncer": bouncer.Name}).Inc() + "bouncer": bouncer.Name, + }).Inc() } } @@ -76,7 +77,8 @@ func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { bouncer, _ := getBouncerFromContext(c) if bouncer != nil { LapiNonNilDecisions.With(prometheus.Labels{ - "bouncer": bouncer.Name}).Inc() + "bouncer": bouncer.Name, + }).Inc() } } @@ -87,7 +89,8 @@ func PrometheusMachinesMiddleware() gin.HandlerFunc { LapiMachineHits.With(prometheus.Labels{ "machine": machineID, "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() } c.Next() @@ -101,7 +104,8 @@ func PrometheusBouncersMiddleware() gin.HandlerFunc { LapiBouncerHits.With(prometheus.Labels{ "bouncer": bouncer.Name, "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() } c.Next() @@ -114,7 +118,8 @@ func PrometheusMiddleware() gin.HandlerFunc { LapiRouteHits.With(prometheus.Labels{ "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() c.Next() elapsed := time.Since(startTime) diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index e4c9dda47ce..1c70c495a3a 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -191,7 +191,7 @@ func TestDeleteDecisionByID(t *testing.T) { // Create Valid Alert lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") - //Have one alerts + // Have one alert w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) @@ -210,7 +210,7 @@ func TestDeleteDecisionByID(t *testing.T) { errResp, _ = readDecisionsErrorResp(t, w) assert.Equal(t, "decision with id '100' doesn't exist: unable to delete", errResp["message"]) - //Have one alerts + // Have one alert w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) @@ -223,7 +223,7 @@ func TestDeleteDecisionByID(t *testing.T) { resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "1", resp.NbDeleted) - //Have one alert (because we delete an alert that has dup targets) + // Have one alert (because we delete an alert that has dup targets) w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) @@ -251,7 +251,7 @@ func TestDeleteDecision(t *testing.T) { } func TestStreamStartDecisionDedup(t *testing.T) { - //Ensure that at stream startup we only get the longest decision + // Ensure that at stream startup we only get the longest decision lapi := SetupLAPITest(t) // Create Valid Alert : 3 decisions for 127.0.0.1, longest has id=3 @@ -299,7 +299,7 @@ func TestStreamStartDecisionDedup(t *testing.T) { w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - //and now we only get a deleted decision + // and now we only get a deleted decision w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) diff --git a/pkg/apiserver/middlewares/v1/cache.go b/pkg/apiserver/middlewares/v1/cache.go index a058ec40393..b0037bc4fa4 100644 --- a/pkg/apiserver/middlewares/v1/cache.go +++ b/pkg/apiserver/middlewares/v1/cache.go @@ -9,7 +9,7 @@ import ( ) type cacheEntry struct { - err error // if nil, the certificate is not revocated + err error // if nil, the certificate is not revocated timestamp time.Time } diff --git a/pkg/apiserver/middlewares/v1/crl.go b/pkg/apiserver/middlewares/v1/crl.go index f85a410998e..64d7d3f0d96 100644 --- a/pkg/apiserver/middlewares/v1/crl.go +++ b/pkg/apiserver/middlewares/v1/crl.go @@ -12,13 +12,13 @@ import ( ) type CRLChecker struct { - path string // path to the CRL file - fileInfo os.FileInfo // last stat of the CRL file - crls []*x509.RevocationList // parsed CRLs + path string // path to the CRL file + fileInfo os.FileInfo // last stat of the CRL file + crls []*x509.RevocationList // parsed CRLs logger *log.Entry mu sync.RWMutex - lastLoad time.Time // time when the CRL file was last read successfully - onLoad func() // called when the CRL file changes (and is read successfully) + lastLoad time.Time // time when the CRL file was last read successfully + onLoad func() // called when the CRL file changes (and is read successfully) } func NewCRLChecker(crlPath string, onLoad func(), logger *log.Entry) (*CRLChecker, error) { diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 17ca5b28359..9171e9fce06 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -128,6 +128,8 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { err error ) + ctx := c.Request.Context() + ret := authInput{} if err = c.ShouldBindJSON(&loginInput); err != nil { @@ -144,7 +146,7 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { ret.clientMachine, err = j.DbClient.Ent.Machine.Query(). Where(machine.MachineId(ret.machineID)). - First(j.DbClient.CTX) + First(ctx) if err != nil { log.Infof("Error machine login for %s : %+v ", ret.machineID, err) return nil, err diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index 18b19b03418..78f5dc9b0fe 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -97,6 +97,8 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { } func AlertCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + switch message.Header.OperationCmd { case "add": data, err := json.Marshal(message.Data) @@ -155,7 +157,7 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { } // use a different method: alert and/or decision might already be partially present in the database - _, err = p.DBClient.CreateOrUpdateAlert("", alert) + _, err = p.DBClient.CreateOrUpdateAlert(ctx, "", alert) if err != nil { log.Errorf("Failed to create alerts in DB: %s", err) } else { diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index d2760a209f9..ede9c89fe9a 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -35,12 +35,12 @@ const ( // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them -func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) (string, error) { +func (c *Client) CreateOrUpdateAlert(ctx context.Context, machineID string, alertItem *models.Alert) (string, error) { if alertItem.UUID == "" { return "", errors.New("alert UUID is empty") } - alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(c.CTX) + alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(ctx) if err != nil && !ent.IsNotFound(err) { return "", fmt.Errorf("unable to query alerts for uuid %s: %w", alertItem.UUID, err) @@ -48,7 +48,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // alert wasn't found, insert it (expected hotpath) if ent.IsNotFound(err) || len(alerts) == 0 { - alertIDs, err := c.CreateAlert(machineID, []*models.Alert{alertItem}) + alertIDs, err := c.CreateAlert(ctx, machineID, []*models.Alert{alertItem}) if err != nil { return "", fmt.Errorf("unable to create alert: %w", err) } @@ -165,7 +165,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) builderChunks := slicetools.Chunks(decisionBuilders, c.decisionBulkSize) for _, builderChunk := range builderChunks { - decisionsCreateRet, err := c.Ent.Decision.CreateBulk(builderChunk...).Save(c.CTX) + decisionsCreateRet, err := c.Ent.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { return "", fmt.Errorf("creating alert decisions: %w", err) } @@ -178,7 +178,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) decisionChunks := slicetools.Chunks(decisions, c.decisionBulkSize) for _, decisionChunk := range decisionChunks { - err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisionChunk...).Exec(c.CTX) + err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisionChunk...).Exec(ctx) if err != nil { return "", fmt.Errorf("updating alert %s: %w", alertItem.UUID, err) } @@ -191,7 +191,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // it takes care of creating the new alert with the associated decisions, and it will as well deleted the "older" overlapping decisions: // 1st pull, you get decisions [1,2,3]. it inserts [1,2,3] // 2nd pull, you get decisions [1,2,3,4]. it inserts [1,2,3,4] and will try to delete [1,2,3,4] with a different alert ID and same origin -func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, int, error) { +func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models.Alert) (int, int, int, error) { if alertItem == nil { return 0, 0, 0, errors.New("nil alert") } @@ -244,7 +244,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in SetScenarioHash(*alertItem.ScenarioHash). SetRemediation(true) // it's from CAPI, we always have decisions - alertRef, err := alertB.Save(c.CTX) + alertRef, err := alertB.Save(ctx) if err != nil { return 0, 0, 0, errors.Wrapf(BulkError, "error creating alert : %s", err) } @@ -253,7 +253,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in return alertRef.ID, 0, 0, nil } - txClient, err := c.Ent.Tx(c.CTX) + txClient, err := c.Ent.Tx(ctx) if err != nil { return 0, 0, 0, errors.Wrapf(BulkError, "error creating transaction : %s", err) } @@ -347,7 +347,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in decision.OriginEQ(DecOrigin), decision.Not(decision.HasOwnerWith(alert.IDEQ(alertRef.ID))), decision.ValueIn(deleteChunk...), - )).Exec(c.CTX) + )).Exec(ctx) if err != nil { rollbackErr := txClient.Rollback() if rollbackErr != nil { @@ -363,7 +363,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in builderChunks := slicetools.Chunks(decisionBuilders, c.decisionBulkSize) for _, builderChunk := range builderChunks { - insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(c.CTX) + insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { rollbackErr := txClient.Rollback() if rollbackErr != nil { @@ -391,7 +391,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in return alertRef.ID, inserted, deleted, nil } -func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) { +func (c *Client) createDecisionChunk(ctx context.Context, simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) { decisionCreate := []*ent.DecisionCreate{} for _, decisionItem := range decisions { @@ -436,7 +436,7 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis return nil, nil } - ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(c.CTX) + ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(ctx) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis return ret, nil } -func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { +func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { alertBuilders := []*ent.AlertCreate{} alertDecisions := [][]*ent.Decision{} @@ -540,7 +540,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ c.Log.Warningf("dropped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) } - events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(c.CTX) + events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(ctx) if err != nil { return nil, errors.Wrapf(BulkError, "creating alert events: %s", err) } @@ -554,12 +554,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ value := metaItem.Value if len(metaItem.Value) > 4095 { - c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + c.Log.Warningf("truncated meta %s: value too long", metaItem.Key) + value = value[:4095] } if len(metaItem.Key) > 255 { - c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) + c.Log.Warningf("truncated meta %s: key too long", metaItem.Key) + key = key[:255] } @@ -568,7 +570,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ SetValue(value) } - metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) + metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(ctx) if err != nil { c.Log.Warningf("error creating alert meta: %s", err) } @@ -578,7 +580,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ decisionChunks := slicetools.Chunks(alertItem.Decisions, c.decisionBulkSize) for _, decisionChunk := range decisionChunks { - decisionRet, err := c.createDecisionChunk(*alertItem.Simulated, stopAtTime, decisionChunk) + decisionRet, err := c.createDecisionChunk(ctx, *alertItem.Simulated, stopAtTime, decisionChunk) if err != nil { return nil, fmt.Errorf("creating alert decisions: %w", err) } @@ -636,7 +638,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ return nil, nil } - alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(c.CTX) + alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(ctx) if err != nil { return nil, errors.Wrapf(BulkError, "bulk creating alert : %s", err) } @@ -653,7 +655,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ for retry < maxLockRetries { // so much for the happy path... but sqlite3 errors work differently - _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(c.CTX) + _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(ctx) if err == nil { break } @@ -678,17 +680,16 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } } } + return ret, nil } -func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { +func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList []*models.Alert) ([]string, error) { var ( owner *ent.Machine err error ) - ctx := context.TODO() - if machineID != "" { owner, err = c.QueryMachineByID(ctx, machineID) if err != nil { @@ -708,7 +709,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str alertIDs := []string{} for _, alertChunk := range alertChunks { - ids, err := c.createAlertChunk(machineID, owner, alertChunk) + ids, err := c.createAlertChunk(ctx, machineID, owner, alertChunk) if err != nil { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } @@ -717,7 +718,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str } if owner != nil { - err = owner.Update().SetLastPush(time.Now().UTC()).Exec(c.CTX) + err = owner.Update().SetLastPush(time.Now().UTC()).Exec(ctx) if err != nil { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } @@ -919,7 +920,6 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e case "since", "created_before", "until": if err := handleTimeFilters(param, value[0], &predicates); err != nil { return nil, err - } case "decision_type": predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) @@ -954,7 +954,6 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { return nil, err - } return predicates, nil @@ -996,11 +995,11 @@ func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string] return counts, nil } -func (c *Client) TotalAlerts() (int, error) { - return c.Ent.Alert.Query().Count(c.CTX) +func (c *Client) TotalAlerts(ctx context.Context) (int, error) { + return c.Ent.Alert.Query().Count(ctx) } -func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, error) { +func (c *Client) QueryAlertWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Alert, error) { sort := "DESC" // we sort by desc by default if val, ok := filter["sort"]; ok { @@ -1047,7 +1046,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, WithOwner() if limit == 0 { - limit, err = alerts.Count(c.CTX) + limit, err = alerts.Count(ctx) if err != nil { return nil, fmt.Errorf("unable to count nb alerts: %w", err) } @@ -1059,7 +1058,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, alerts = alerts.Order(ent.Desc(alert.FieldCreatedAt), ent.Desc(alert.FieldID)) } - result, err := alerts.Limit(paginationSize).Offset(offset).All(c.CTX) + result, err := alerts.Limit(paginationSize).Offset(offset).All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "pagination size: %d, offset: %d: %s", paginationSize, offset, err) } @@ -1088,35 +1087,35 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, return ret, nil } -func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { +func (c *Client) DeleteAlertGraphBatch(ctx context.Context, alertItems []*ent.Alert) (int, error) { idList := make([]int, 0) for _, alert := range alertItems { idList = append(idList, alert.ID) } _, err := c.Ent.Event.Delete(). - Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch events") } _, err = c.Ent.Meta.Delete(). - Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch meta") } _, err = c.Ent.Decision.Delete(). - Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch decisions") } deleted, err := c.Ent.Alert.Delete(). - Where(alert.IDIn(idList...)).Exec(c.CTX) + Where(alert.IDIn(idList...)).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return deleted, errors.Wrapf(DeleteFail, "alert graph delete batch") @@ -1127,10 +1126,10 @@ func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { return deleted, nil } -func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { +func (c *Client) DeleteAlertGraph(ctx context.Context, alertItem *ent.Alert) error { // delete the associated events _, err := c.Ent.Event.Delete(). - Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "event with alert ID '%d'", alertItem.ID) @@ -1138,7 +1137,7 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { // delete the associated meta _, err = c.Ent.Meta.Delete(). - Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "meta with alert ID '%d'", alertItem.ID) @@ -1146,14 +1145,14 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { // delete the associated decisions _, err = c.Ent.Decision.Delete(). - Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "decision with alert ID '%d'", alertItem.ID) } // delete the alert - err = c.Ent.Alert.DeleteOne(alertItem).Exec(c.CTX) + err = c.Ent.Alert.DeleteOne(alertItem).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "alert with ID '%d'", alertItem.ID) @@ -1162,26 +1161,26 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { return nil } -func (c *Client) DeleteAlertByID(id int) error { - alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(c.CTX) +func (c *Client) DeleteAlertByID(ctx context.Context, id int) error { + alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(ctx) if err != nil { return err } - return c.DeleteAlertGraph(alertItem) + return c.DeleteAlertGraph(ctx, alertItem) } -func (c *Client) DeleteAlertWithFilter(filter map[string][]string) (int, error) { +func (c *Client) DeleteAlertWithFilter(ctx context.Context, filter map[string][]string) (int, error) { preds, err := AlertPredicatesFromFilter(filter) if err != nil { return 0, err } - return c.Ent.Alert.Delete().Where(preds...).Exec(c.CTX) + return c.Ent.Alert.Delete().Where(preds...).Exec(ctx) } -func (c *Client) GetAlertByID(alertID int) (*ent.Alert, error) { - alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(c.CTX) +func (c *Client) GetAlertByID(ctx context.Context, alertID int) (*ent.Alert, error) { + alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(ctx) if err != nil { /*record not found, 404*/ if ent.IsNotFound(err) { diff --git a/pkg/database/database.go b/pkg/database/database.go index e513459199f..bb41dd3b645 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -21,7 +21,6 @@ import ( type Client struct { Ent *ent.Client - CTX context.Context Log *log.Logger CanFlush bool Type string @@ -106,7 +105,6 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro return &Client{ Ent: client, - CTX: ctx, Log: clog, CanFlush: true, Type: config.Type, diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 8547990c25f..7522a272799 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -31,7 +31,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -321,7 +321,7 @@ func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[strin var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer) */ @@ -440,7 +440,7 @@ func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[strin var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC())) diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 46c8edfa308..8f646ddc961 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -239,7 +239,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e c.FlushOrphans(ctx) c.Log.Debug("Done flushing orphan alerts") - totalAlerts, err = c.TotalAlerts() + totalAlerts, err = c.TotalAlerts(ctx) if err != nil { c.Log.Warningf("FlushAlerts (max items count): %s", err) return fmt.Errorf("unable to get alerts count: %w", err) @@ -252,7 +252,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e "created_before": {MaxAge}, } - nbDeleted, err := c.DeleteAlertWithFilter(filter) + nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) return fmt.Errorf("unable to flush alerts with filter until=%s: %w", MaxAge, err) @@ -268,7 +268,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e // This gives us the oldest alert that we want to keep // We then delete all the alerts with an id lower than this one // We can do this because the id is auto-increment, and the database won't reuse the same id twice - lastAlert, err := c.QueryAlertWithFilter(map[string][]string{ + lastAlert, err := c.QueryAlertWithFilter(ctx, map[string][]string{ "sort": {"DESC"}, "limit": {"1"}, // we do not care about fetching the edges, we just want the id diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 99ba90c80b8..eb4c472821e 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -17,7 +17,7 @@ func (c *Client) CreateMetric(ctx context.Context, generatedType metric.Generate SetReceivedAt(receivedAt). SetPayload(payload). Save(ctx) - if err != nil { + if err != nil { c.Log.Warningf("CreateMetric: %s", err) return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) } From 897613e8377db824d47b32a5d4b27e599280fb9d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:02:53 +0200 Subject: [PATCH 304/318] CI: check generated code in test workflow (#3261) --- .github/workflows/go-tests.yml | 12 ++++++++++++ pkg/database/ent/machine.go | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index d4e3a3d843a..a6c72a91af6 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -128,6 +128,18 @@ jobs: with: go-version: "1.22" + - name: Run "make generate" and check for changes + run: | + set -e + make generate 2>/dev/null + if [[ $(git status --porcelain) ]]; then + echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." + git diff + exit 1 + else + echo "No changes detected after running 'make generate'." + fi + - name: Create localstack streams run: | aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-1-shard --shard-count 1 diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 1b8122060d1..76127065791 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -202,7 +202,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field hubstate", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Hubstate); err != nil { - return fmt.Errorf("parsing field hubstate: %w", err) + return fmt.Errorf("unmarshal field hubstate: %w", err) } } case machine.FieldDatasources: @@ -210,7 +210,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field datasources", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Datasources); err != nil { - return fmt.Errorf("parsing field datasources: %w", err) + return fmt.Errorf("unmarshal field datasources: %w", err) } } default: From 27451a5ee6b88cb36be97634dbbe6c5f16e27cb2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 2 Oct 2024 13:39:07 +0200 Subject: [PATCH 305/318] context propagation: pass ctx to UpdateScenario() (#3258) * context propagation: pass ctx to UpdateScenario() * context propagation: SendMetrics, SendUsageMetrics, plugin config --- cmd/crowdsec-cli/clicapi/capi.go | 2 +- cmd/crowdsec/lapiclient.go | 2 +- pkg/apiclient/auth_jwt.go | 7 +++++-- pkg/apiclient/config.go | 3 ++- pkg/apiserver/apic.go | 10 ++++------ pkg/apiserver/apic_metrics.go | 12 ++++-------- pkg/apiserver/apic_metrics_test.go | 12 +++++++----- pkg/apiserver/apic_test.go | 6 ++++-- pkg/apiserver/apiserver.go | 4 ++-- pkg/csplugin/notifier.go | 4 +--- pkg/protobufs/plugin_interface.go | 4 ++-- 11 files changed, 33 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 24c3ba054a9..cba66f11104 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -170,7 +170,7 @@ func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login // I don't believe papi is neede to check enrollement // PapiURL: papiURL, VersionPrefix: "v3", - UpdateScenario: func() ([]string, error) { + UpdateScenario: func(_ context.Context) ([]string, error) { return itemsForAPI, nil }, }) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index 4556306825c..eed517f9df9 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -36,7 +36,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", - UpdateScenario: func() ([]string, error) { + UpdateScenario: func(_ context.Context) ([]string, error) { return itemsForAPI, nil }, }) diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index b202e382842..193486ff065 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -2,6 +2,7 @@ package apiclient import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -30,15 +31,17 @@ type JWTTransport struct { // Transport is the underlying HTTP transport to use when making requests. // It will default to http.DefaultTransport if nil. Transport http.RoundTripper - UpdateScenario func() ([]string, error) + UpdateScenario func(context.Context) ([]string, error) refreshTokenMutex sync.Mutex } func (t *JWTTransport) refreshJwtToken() error { var err error + ctx := context.TODO() + if t.UpdateScenario != nil { - t.Scenarios, err = t.UpdateScenario() + t.Scenarios, err = t.UpdateScenario(ctx) if err != nil { return fmt.Errorf("can't update scenario list: %w", err) } diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index b08452e74e0..29a8acf185e 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -1,6 +1,7 @@ package apiclient import ( + "context" "net/url" "github.com/go-openapi/strfmt" @@ -15,5 +16,5 @@ type Config struct { VersionPrefix string UserAgent string RegistrationToken string - UpdateScenario func() ([]string, error) + UpdateScenario func(context.Context) ([]string, error) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 9b56fef6549..c8768e71b0a 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -82,11 +82,9 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { return ret } -func (a *apic) FetchScenariosListFromDB() ([]string, error) { +func (a *apic) FetchScenariosListFromDB(ctx context.Context) ([]string, error) { scenarios := make([]string, 0) - ctx := context.TODO() - machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) @@ -214,7 +212,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.PapiURL, err) } - ret.scenarioList, err = ret.FetchScenariosListFromDB() + ret.scenarioList, err = ret.FetchScenariosListFromDB(ctx) if err != nil { return nil, fmt.Errorf("while fetching scenarios from db: %w", err) } @@ -234,7 +232,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient // The watcher will be authenticated by the RoundTripper the first time it will call CAPI // Explicit authentication will provoke a useless supplementary call to CAPI - scenarios, err := ret.FetchScenariosListFromDB() + scenarios, err := ret.FetchScenariosListFromDB(ctx) if err != nil { return ret, fmt.Errorf("get scenario in db: %w", err) } @@ -944,7 +942,7 @@ func (a *apic) Pull(ctx context.Context) error { toldOnce := false for { - scenario, err := a.FetchScenariosListFromDB() + scenario, err := a.FetchScenariosListFromDB(ctx) if err != nil { log.Errorf("unable to fetch scenarios from db: %s", err) } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 16b2328dbe9..3d9e7b28a79 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -251,11 +251,9 @@ func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) { // Metrics are sent at start, then at the randomized metricsIntervalFirst, // then at regular metricsInterval. If a change is detected in the list // of machines, the next metrics are sent immediately. -func (a *apic) SendMetrics(stop chan (bool)) { +func (a *apic) SendMetrics(ctx context.Context, stop chan (bool)) { defer trace.CatchPanic("lapi/metricsToAPIC") - ctx := context.TODO() - // verify the list of machines every interval const checkInt = 20 * time.Second @@ -321,7 +319,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { if metrics != nil { log.Info("capi metrics: sending") - _, _, err = a.apiClient.Metrics.Add(context.Background(), metrics) + _, _, err = a.apiClient.Metrics.Add(ctx, metrics) if err != nil { log.Errorf("capi metrics: failed: %s", err) } @@ -339,11 +337,9 @@ func (a *apic) SendMetrics(stop chan (bool)) { } } -func (a *apic) SendUsageMetrics() { +func (a *apic) SendUsageMetrics(ctx context.Context) { defer trace.CatchPanic("lapi/usageMetricsToAPIC") - ctx := context.TODO() - firstRun := true log.Debugf("Start sending usage metrics to CrowdSec Central API (interval: %s once, then %s)", a.usageMetricsIntervalFirst, a.usageMetricsInterval) @@ -368,7 +364,7 @@ func (a *apic) SendUsageMetrics() { continue } - _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) + _, resp, err := a.apiClient.UsageMetrics.Add(ctx, metrics) if err != nil { log.Errorf("unable to send usage metrics: %s", err) diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 78b16f9c8b7..13a24668f26 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -14,6 +14,8 @@ import ( ) func TestAPICSendMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string duration time.Duration @@ -34,7 +36,7 @@ func TestAPICSendMetrics(t *testing.T) { metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) { - api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().ExecX(ctx) api.dbClient.Ent.Machine.Create(). SetMachineId("1234"). SetPassword(testPassword.String()). @@ -42,16 +44,16 @@ func TestAPICSendMetrics(t *testing.T) { SetScenarios("crowdsecurity/test"). SetLastPush(time.Time{}). SetUpdatedAt(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) - api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) api.dbClient.Ent.Bouncer.Create(). SetIPAddress("1.2.3.6"). SetName("someBouncer"). SetAPIKey("foobar"). SetRevoked(false). SetLastPull(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) }, }, } @@ -86,7 +88,7 @@ func TestAPICSendMetrics(t *testing.T) { httpmock.ZeroCallCounters() - go api.SendMetrics(stop) + go api.SendMetrics(ctx, stop) time.Sleep(tc.duration) stop <- true diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 3bb158acf35..a215edb2fbd 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -143,6 +143,8 @@ func TestAPICCAPIPullIsOld(t *testing.T) { } func TestAPICFetchScenariosListFromDB(t *testing.T) { + ctx := context.Background() + tests := []struct { name string machineIDsWithScenarios map[string]string @@ -174,10 +176,10 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { SetPassword(testPassword.String()). SetIpAddress("1.2.3.4"). SetScenarios(scenarios). - ExecX(context.Background()) + ExecX(ctx) } - scenarios, err := api.FetchScenariosListFromDB() + scenarios, err := api.FetchScenariosListFromDB(ctx) require.NoError(t, err) for machineID := range tc.machineIDsWithScenarios { diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 6b5d6803be9..2b2b453348a 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -357,12 +357,12 @@ func (s *APIServer) initAPIC(ctx context.Context) { } s.apic.metricsTomb.Go(func() error { - s.apic.SendMetrics(make(chan bool)) + s.apic.SendMetrics(ctx, make(chan bool)) return nil }) s.apic.metricsTomb.Go(func() error { - s.apic.SendUsageMetrics() + s.apic.SendUsageMetrics(ctx) return nil }) } diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index 2b5d57fbcff..ed4a4cc4149 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -40,9 +40,7 @@ func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notific } func (m *GRPCClient) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { - _, err := m.client.Configure( - context.Background(), config, - ) + _, err := m.client.Configure(ctx, config) return &protobufs.Empty{}, err } diff --git a/pkg/protobufs/plugin_interface.go b/pkg/protobufs/plugin_interface.go index fc89b2fa009..baa76c8941c 100644 --- a/pkg/protobufs/plugin_interface.go +++ b/pkg/protobufs/plugin_interface.go @@ -24,12 +24,12 @@ type NotifierPlugin struct { type GRPCClient struct{ client NotifierClient } func (m *GRPCClient) Notify(ctx context.Context, notification *Notification) (*Empty, error) { - _, err := m.client.Notify(context.Background(), notification) + _, err := m.client.Notify(ctx, notification) return &Empty{}, err } func (m *GRPCClient) Configure(ctx context.Context, config *Config) (*Empty, error) { - _, err := m.client.Configure(context.Background(), config) + _, err := m.client.Configure(ctx, config) return &Empty{}, err } From 56c9c7a80486910bb010c0630a5f258625b02ece Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:44:13 +0200 Subject: [PATCH 306/318] Re-generate capi models (#3260) * add centralapi_swagger.yaml * remove nullable to avoid generate error * re-generate pkg/modelscapi * update go-swagger to 0.31.0 --- Makefile | 1 + pkg/models/generate.go | 2 +- pkg/modelscapi/add_signals_request.go | 5 + pkg/modelscapi/add_signals_request_item.go | 9 + .../add_signals_request_item_decisions.go | 5 + ...add_signals_request_item_decisions_item.go | 3 + pkg/modelscapi/centralapi_swagger.yaml | 875 ++++++++++++++++++ pkg/modelscapi/decisions_delete_request.go | 5 + pkg/modelscapi/decisions_sync_request.go | 5 + pkg/modelscapi/decisions_sync_request_item.go | 1 + .../decisions_sync_request_item_decisions.go | 5 + pkg/modelscapi/generate.go | 4 + .../get_decisions_stream_response.go | 5 + .../get_decisions_stream_response_deleted.go | 5 + .../get_decisions_stream_response_links.go | 5 + .../get_decisions_stream_response_new.go | 5 + .../get_decisions_stream_response_new_item.go | 5 + pkg/modelscapi/metrics_request.go | 10 + 18 files changed, 954 insertions(+), 1 deletion(-) create mode 100644 pkg/modelscapi/centralapi_swagger.yaml create mode 100644 pkg/modelscapi/generate.go diff --git a/Makefile b/Makefile index a0b06dc2ea0..cb0d5ebaf60 100644 --- a/Makefile +++ b/Makefile @@ -267,6 +267,7 @@ crowdsec: ## Build crowdsec generate: ## Generate code for the database and APIs $(GO) generate ./pkg/database/ent $(GO) generate ./pkg/models + $(GO) generate ./pkg/modelscapi .PHONY: testclean testclean: bats-clean ## Remove test artifacts diff --git a/pkg/models/generate.go b/pkg/models/generate.go index ccacc409ab5..502d6f3d2cf 100644 --- a/pkg/models/generate.go +++ b/pkg/models/generate.go @@ -1,4 +1,4 @@ package models -//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate model --spec=./localapi_swagger.yaml --target=../ +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate model --spec=./localapi_swagger.yaml --target=../ diff --git a/pkg/modelscapi/add_signals_request.go b/pkg/modelscapi/add_signals_request.go index 62fe590cb79..7bfe6ae80e0 100644 --- a/pkg/modelscapi/add_signals_request.go +++ b/pkg/modelscapi/add_signals_request.go @@ -56,6 +56,11 @@ func (m AddSignalsRequest) ContextValidate(ctx context.Context, formats strfmt.R for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/add_signals_request_item.go b/pkg/modelscapi/add_signals_request_item.go index f9c865b4c68..5f63b542d5a 100644 --- a/pkg/modelscapi/add_signals_request_item.go +++ b/pkg/modelscapi/add_signals_request_item.go @@ -65,6 +65,9 @@ type AddSignalsRequestItem struct { // stop at // Required: true StopAt *string `json:"stop_at"` + + // UUID of the alert + UUID string `json:"uuid,omitempty"` } // Validate validates this add signals request item @@ -257,6 +260,11 @@ func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, form for i := 0; i < len(m.Context); i++ { if m.Context[i] != nil { + + if swag.IsZero(m.Context[i]) { // not required + return nil + } + if err := m.Context[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("context" + "." + strconv.Itoa(i)) @@ -289,6 +297,7 @@ func (m *AddSignalsRequestItem) contextValidateDecisions(ctx context.Context, fo func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/modelscapi/add_signals_request_item_decisions.go b/pkg/modelscapi/add_signals_request_item_decisions.go index 54e123ab3f8..11ed27a496d 100644 --- a/pkg/modelscapi/add_signals_request_item_decisions.go +++ b/pkg/modelscapi/add_signals_request_item_decisions.go @@ -54,6 +54,11 @@ func (m AddSignalsRequestItemDecisions) ContextValidate(ctx context.Context, for for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/add_signals_request_item_decisions_item.go b/pkg/modelscapi/add_signals_request_item_decisions_item.go index 34dfeb5bce5..797c517e33f 100644 --- a/pkg/modelscapi/add_signals_request_item_decisions_item.go +++ b/pkg/modelscapi/add_signals_request_item_decisions_item.go @@ -49,6 +49,9 @@ type AddSignalsRequestItemDecisionsItem struct { // until Until string `json:"until,omitempty"` + // UUID of the decision + UUID string `json:"uuid,omitempty"` + // the value of the decision scope : an IP, a range, a username, etc // Required: true Value *string `json:"value"` diff --git a/pkg/modelscapi/centralapi_swagger.yaml b/pkg/modelscapi/centralapi_swagger.yaml new file mode 100644 index 00000000000..bd695894f2b --- /dev/null +++ b/pkg/modelscapi/centralapi_swagger.yaml @@ -0,0 +1,875 @@ +swagger: "2.0" +info: + description: + "API to manage machines using [crowdsec](https://github.com/crowdsecurity/crowdsec)\ + \ and bouncers.\n" + version: "2023-01-23T11:16:39Z" + title: "prod-capi-v3" + contact: + name: "Crowdsec team" + url: "https://github.com/crowdsecurity/crowdsec" + email: "support@crowdsec.net" +host: "api.crowdsec.net" +basePath: "/v3" +tags: + - name: "watchers" + description: "Operations about watchers: crowdsec & cscli" + - name: "bouncers" + description: "Operations about decisions : bans, captcha, rate-limit etc." +schemes: + - "https" +paths: + /decisions/delete: + post: + tags: + - "watchers" + summary: "delete decisions" + description: "delete provided decisions" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "DecisionsDeleteRequest" + required: true + schema: + $ref: "#/definitions/DecisionsDeleteRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /decisions/stream: + get: + tags: + - "bouncers" + - "watchers" + summary: "returns list of top decisions" + description: "returns list of top decisions to add or delete" + produces: + - "application/json" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/GetDecisionsStreamResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "404": + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + options: + consumes: + - "application/json" + produces: + - "application/json" + responses: + "200": + description: "200 response" + headers: + Access-Control-Allow-Origin: + type: "string" + Access-Control-Allow-Methods: + type: "string" + Access-Control-Allow-Headers: + type: "string" + /decisions/sync: + post: + tags: + - "watchers" + summary: "sync decisions" + description: "sync provided decisions" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "DecisionsSyncRequest" + required: true + schema: + $ref: "#/definitions/DecisionsSyncRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /metrics: + post: + tags: + - "watchers" + summary: "receive metrics about enrolled machines and bouncers in APIL" + description: "receive metrics about enrolled machines and bouncers in APIL" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "MetricsRequest" + required: true + schema: + $ref: "#/definitions/MetricsRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /signals: + post: + tags: + - "watchers" + summary: "Push signals" + description: "to push signals" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "AddSignalsRequest" + required: true + schema: + $ref: "#/definitions/AddSignalsRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /watchers: + post: + tags: + - "watchers" + summary: "Register watcher" + description: "Register a watcher" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "RegisterRequest" + required: true + schema: + $ref: "#/definitions/RegisterRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/enroll: + post: + tags: + - "watchers" + summary: "watcher enrollment" + description: "watcher enrollment : enroll watcher to crowdsec backoffice account" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "EnrollRequest" + required: true + schema: + $ref: "#/definitions/EnrollRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /watchers/login: + post: + tags: + - "watchers" + summary: "watcher login" + description: "Sign-in to get a valid token" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "LoginRequest" + required: true + schema: + $ref: "#/definitions/LoginRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/LoginResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/reset: + post: + tags: + - "watchers" + summary: "Reset Password" + description: "to reset a watcher password" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "ResetPasswordRequest" + required: true + schema: + $ref: "#/definitions/ResetPasswordRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + "404": + description: "404 response" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + options: + consumes: + - "application/json" + produces: + - "application/json" + responses: + "200": + description: "200 response" + headers: + Access-Control-Allow-Origin: + type: "string" + Access-Control-Allow-Methods: + type: "string" + Access-Control-Allow-Headers: + type: "string" +securityDefinitions: + UserPoolAuthorizer: + type: "apiKey" + name: "Authorization" + in: "header" + x-amazon-apigateway-authtype: "cognito_user_pools" +definitions: + DecisionsDeleteRequest: + title: "delete decisions" + type: "array" + description: "delete decision model" + items: + $ref: "#/definitions/DecisionsDeleteRequestItem" + DecisionsSyncRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + scenario_trust: + type: "string" + scenario_hash: + type: "string" + scenario: + type: "string" + alert_id: + type: "integer" + created_at: + type: "string" + machine_id: + type: "string" + decisions: + $ref: "#/definitions/DecisionsSyncRequestItemDecisions" + source: + $ref: "#/definitions/DecisionsSyncRequestItemSource" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + start_at: + type: "string" + stop_at: + type: "string" + title: "Signal" + AddSignalsRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + created_at: + type: "string" + machine_id: + type: "string" + source: + $ref: "#/definitions/AddSignalsRequestItemSource" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + uuid: + type: "string" + description: "UUID of the alert" + start_at: + type: "string" + scenario_trust: + type: "string" + scenario_hash: + type: "string" + scenario: + type: "string" + alert_id: + type: "integer" + context: + type: "array" + items: + type: "object" + properties: + value: + type: "string" + key: + type: "string" + decisions: + $ref: "#/definitions/AddSignalsRequestItemDecisions" + stop_at: + type: "string" + title: "Signal" + DecisionsSyncRequest: + title: "sync decisions request" + type: "array" + description: "sync decision model" + items: + $ref: "#/definitions/DecisionsSyncRequestItem" + LoginRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + minLength: 48 + maxLength: 48 + pattern: "^[a-zA-Z0-9]+$" + scenarios: + type: "array" + description: "all scenarios installed" + items: + type: "string" + title: "login request" + description: "Login request model" + GetDecisionsStreamResponseNewItem: + type: "object" + required: + - "scenario" + - "scope" + - "decisions" + properties: + scenario: + type: "string" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + decisions: + type: array + items: + type: object + required: + - value + - duration + properties: + duration: + type: "string" + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "New Decisions" + GetDecisionsStreamResponseDeletedItem: + type: object + required: + - scope + - decisions + properties: + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + decisions: + type: array + items: + type: string + BlocklistLink: + type: object + required: + - name + - url + - remediation + - scope + - duration + properties: + name: + type: string + description: "the name of the blocklist" + url: + type: string + description: "the url from which the blocklist content can be downloaded" + remediation: + type: string + description: "the remediation that should be used for the blocklist" + scope: + type: string + description: "the scope of decisions in the blocklist" + duration: + type: string + AddSignalsRequestItemDecisionsItem: + type: "object" + required: + - "duration" + - "id" + - "origin" + - "scenario" + - "scope" + - "type" + - "value" + properties: + duration: + type: "string" + uuid: + type: "string" + description: "UUID of the decision" + scenario: + type: "string" + origin: + type: "string" + description: "the origin of the decision : cscli, crowdsec" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + simulated: + type: "boolean" + until: + type: "string" + id: + type: "integer" + description: "(only relevant for GET ops) the unique id" + type: + type: "string" + description: + "the type of decision, might be 'ban', 'captcha' or something\ + \ custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL." + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "Decision" + EnrollRequest: + type: "object" + required: + - "attachment_key" + properties: + name: + type: "string" + description: "The name that will be display in the console for the instance" + overwrite: + type: "boolean" + description: "To force enroll the instance" + attachment_key: + type: "string" + description: + "attachment_key is generated in your crowdsec backoffice account\ + \ and allows you to enroll your machines to your BO account" + pattern: "^[a-zA-Z0-9]+$" + tags: + type: "array" + description: "Tags to apply on the console for the instance" + items: + type: "string" + title: "enroll request" + description: "enroll request model" + ResetPasswordRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + minLength: 48 + maxLength: 48 + pattern: "^[a-zA-Z0-9]+$" + title: "resetPassword" + description: "ResetPassword request model" + MetricsRequestBouncersItem: + type: "object" + properties: + last_pull: + type: "string" + description: "last bouncer pull date" + custom_name: + type: "string" + description: "bouncer name" + name: + type: "string" + description: "bouncer type (firewall, php...)" + version: + type: "string" + description: "bouncer version" + title: "MetricsBouncerInfo" + AddSignalsRequestItemSource: + type: "object" + required: + - "scope" + - "value" + properties: + scope: + type: "string" + description: "the scope of a source : ip,range,username,etc" + ip: + type: "string" + description: "provided as a convenience when the source is an IP" + latitude: + type: "number" + format: "float" + as_number: + type: "string" + description: "provided as a convenience when the source is an IP" + range: + type: "string" + description: "provided as a convenience when the source is an IP" + cn: + type: "string" + value: + type: "string" + description: "the value of a source : the ip, the range, the username,etc" + as_name: + type: "string" + description: "provided as a convenience when the source is an IP" + longitude: + type: "number" + format: "float" + title: "Source" + DecisionsSyncRequestItemDecisions: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/DecisionsSyncRequestItemDecisionsItem" + RegisterRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + pattern: "^[a-zA-Z0-9]+$" + title: "register request" + description: "Register request model" + SuccessResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "message" + title: "success response" + description: "success response return by the API" + LoginResponse: + type: "object" + properties: + code: + type: "integer" + expire: + type: "string" + token: + type: "string" + title: "login response" + description: "Login request model" + DecisionsSyncRequestItemDecisionsItem: + type: "object" + required: + - "duration" + - "id" + - "origin" + - "scenario" + - "scope" + - "type" + - "value" + properties: + duration: + type: "string" + scenario: + type: "string" + origin: + type: "string" + description: "the origin of the decision : cscli, crowdsec" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + simulated: + type: "boolean" + until: + type: "string" + id: + type: "integer" + description: "(only relevant for GET ops) the unique id" + type: + type: "string" + description: + "the type of decision, might be 'ban', 'captcha' or something\ + \ custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL." + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "Decision" + GetDecisionsStreamResponse: + type: "object" + properties: + new: + $ref: "#/definitions/GetDecisionsStreamResponseNew" + deleted: + $ref: "#/definitions/GetDecisionsStreamResponseDeleted" + links: + $ref: "#/definitions/GetDecisionsStreamResponseLinks" + title: "get decisions stream response" + description: "get decision response model" + DecisionsSyncRequestItemSource: + type: "object" + required: + - "scope" + - "value" + properties: + scope: + type: "string" + description: "the scope of a source : ip,range,username,etc" + ip: + type: "string" + description: "provided as a convenience when the source is an IP" + latitude: + type: "number" + format: "float" + as_number: + type: "string" + description: "provided as a convenience when the source is an IP" + range: + type: "string" + description: "provided as a convenience when the source is an IP" + cn: + type: "string" + value: + type: "string" + description: "the value of a source : the ip, the range, the username,etc" + as_name: + type: "string" + description: "provided as a convenience when the source is an IP" + longitude: + type: "number" + format: "float" + title: "Source" + AddSignalsRequestItemDecisions: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/AddSignalsRequestItemDecisionsItem" + MetricsRequestMachinesItem: + type: "object" + properties: + last_update: + type: "string" + description: "last agent update date" + name: + type: "string" + description: "agent name" + last_push: + type: "string" + description: "last agent push date" + version: + type: "string" + description: "agent version" + title: "MetricsAgentInfo" + MetricsRequest: + type: "object" + required: + - "bouncers" + - "machines" + properties: + bouncers: + type: "array" + items: + $ref: "#/definitions/MetricsRequestBouncersItem" + machines: + type: "array" + items: + $ref: "#/definitions/MetricsRequestMachinesItem" + title: "metrics" + description: "push metrics model" + ErrorResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "Error message" + errors: + type: "string" + description: "more detail on individual errors" + title: "error response" + description: "error response return by the API" + AddSignalsRequest: + title: "add signals request" + type: "array" + description: "All signals request model" + items: + $ref: "#/definitions/AddSignalsRequestItem" + DecisionsDeleteRequestItem: + type: "string" + title: "decisionsIDs" + GetDecisionsStreamResponseNew: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/GetDecisionsStreamResponseNewItem" + GetDecisionsStreamResponseDeleted: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/GetDecisionsStreamResponseDeletedItem" + GetDecisionsStreamResponseLinks: + title: "Decisions list" + type: "object" + properties: + blocklists: + type: array + items: + $ref: "#/definitions/BlocklistLink" + diff --git a/pkg/modelscapi/decisions_delete_request.go b/pkg/modelscapi/decisions_delete_request.go index e8718835027..0c93558adf1 100644 --- a/pkg/modelscapi/decisions_delete_request.go +++ b/pkg/modelscapi/decisions_delete_request.go @@ -11,6 +11,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // DecisionsDeleteRequest delete decisions @@ -49,6 +50,10 @@ func (m DecisionsDeleteRequest) ContextValidate(ctx context.Context, formats str for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/decisions_sync_request.go b/pkg/modelscapi/decisions_sync_request.go index e3a95162519..c087d39ff62 100644 --- a/pkg/modelscapi/decisions_sync_request.go +++ b/pkg/modelscapi/decisions_sync_request.go @@ -56,6 +56,11 @@ func (m DecisionsSyncRequest) ContextValidate(ctx context.Context, formats strfm for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/decisions_sync_request_item.go b/pkg/modelscapi/decisions_sync_request_item.go index 5139ea2de4b..460fe4d430e 100644 --- a/pkg/modelscapi/decisions_sync_request_item.go +++ b/pkg/modelscapi/decisions_sync_request_item.go @@ -231,6 +231,7 @@ func (m *DecisionsSyncRequestItem) contextValidateDecisions(ctx context.Context, func (m *DecisionsSyncRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/modelscapi/decisions_sync_request_item_decisions.go b/pkg/modelscapi/decisions_sync_request_item_decisions.go index 76316e43c5e..bdc8e77e2b6 100644 --- a/pkg/modelscapi/decisions_sync_request_item_decisions.go +++ b/pkg/modelscapi/decisions_sync_request_item_decisions.go @@ -54,6 +54,11 @@ func (m DecisionsSyncRequestItemDecisions) ContextValidate(ctx context.Context, for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/generate.go b/pkg/modelscapi/generate.go new file mode 100644 index 00000000000..66dc2a34b7e --- /dev/null +++ b/pkg/modelscapi/generate.go @@ -0,0 +1,4 @@ +package modelscapi + +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate model --spec=./centralapi_swagger.yaml --target=../ --model-package=modelscapi + diff --git a/pkg/modelscapi/get_decisions_stream_response.go b/pkg/modelscapi/get_decisions_stream_response.go index af19b85c4d3..5ebf29c5d93 100644 --- a/pkg/modelscapi/get_decisions_stream_response.go +++ b/pkg/modelscapi/get_decisions_stream_response.go @@ -144,6 +144,11 @@ func (m *GetDecisionsStreamResponse) contextValidateDeleted(ctx context.Context, func (m *GetDecisionsStreamResponse) contextValidateLinks(ctx context.Context, formats strfmt.Registry) error { if m.Links != nil { + + if swag.IsZero(m.Links) { // not required + return nil + } + if err := m.Links.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("links") diff --git a/pkg/modelscapi/get_decisions_stream_response_deleted.go b/pkg/modelscapi/get_decisions_stream_response_deleted.go index d218bf87e4e..78292860f22 100644 --- a/pkg/modelscapi/get_decisions_stream_response_deleted.go +++ b/pkg/modelscapi/get_decisions_stream_response_deleted.go @@ -54,6 +54,11 @@ func (m GetDecisionsStreamResponseDeleted) ContextValidate(ctx context.Context, for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_links.go b/pkg/modelscapi/get_decisions_stream_response_links.go index 85cc9af9b48..6b9054574f1 100644 --- a/pkg/modelscapi/get_decisions_stream_response_links.go +++ b/pkg/modelscapi/get_decisions_stream_response_links.go @@ -82,6 +82,11 @@ func (m *GetDecisionsStreamResponseLinks) contextValidateBlocklists(ctx context. for i := 0; i < len(m.Blocklists); i++ { if m.Blocklists[i] != nil { + + if swag.IsZero(m.Blocklists[i]) { // not required + return nil + } + if err := m.Blocklists[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("blocklists" + "." + strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_new.go b/pkg/modelscapi/get_decisions_stream_response_new.go index e9525bf6fa7..8e09f1b20e7 100644 --- a/pkg/modelscapi/get_decisions_stream_response_new.go +++ b/pkg/modelscapi/get_decisions_stream_response_new.go @@ -54,6 +54,11 @@ func (m GetDecisionsStreamResponseNew) ContextValidate(ctx context.Context, form for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_new_item.go b/pkg/modelscapi/get_decisions_stream_response_new_item.go index a3592d0ab61..77cc06732ce 100644 --- a/pkg/modelscapi/get_decisions_stream_response_new_item.go +++ b/pkg/modelscapi/get_decisions_stream_response_new_item.go @@ -119,6 +119,11 @@ func (m *GetDecisionsStreamResponseNewItem) contextValidateDecisions(ctx context for i := 0; i < len(m.Decisions); i++ { if m.Decisions[i] != nil { + + if swag.IsZero(m.Decisions[i]) { // not required + return nil + } + if err := m.Decisions[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) diff --git a/pkg/modelscapi/metrics_request.go b/pkg/modelscapi/metrics_request.go index d5b7d058fc1..5d663cf1750 100644 --- a/pkg/modelscapi/metrics_request.go +++ b/pkg/modelscapi/metrics_request.go @@ -126,6 +126,11 @@ func (m *MetricsRequest) contextValidateBouncers(ctx context.Context, formats st for i := 0; i < len(m.Bouncers); i++ { if m.Bouncers[i] != nil { + + if swag.IsZero(m.Bouncers[i]) { // not required + return nil + } + if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) @@ -146,6 +151,11 @@ func (m *MetricsRequest) contextValidateMachines(ctx context.Context, formats st for i := 0; i < len(m.Machines); i++ { if m.Machines[i] != nil { + + if swag.IsZero(m.Machines[i]) { // not required + return nil + } + if err := m.Machines[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("machines" + "." + strconv.Itoa(i)) From af3116d1a7094273afd9c571c7356d46a0b250ac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Oct 2024 11:06:39 +0200 Subject: [PATCH 307/318] context propagation: don't store ctx in api controller (#3268) --- pkg/apiserver/apiserver.go | 1 - pkg/apiserver/controllers/controller.go | 3 --- pkg/apiserver/controllers/v1/controller.go | 4 ---- 3 files changed, 8 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 2b2b453348a..8fe500c7f52 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -229,7 +229,6 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller := &controllers.Controller{ DBClient: dbClient, - Ectx: ctx, Router: router, Profiles: config.Profiles, Log: clog, diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 29f02723b70..719bb231006 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -1,7 +1,6 @@ package controllers import ( - "context" "net" "net/http" "strings" @@ -18,7 +17,6 @@ import ( ) type Controller struct { - Ectx context.Context DBClient *database.Client Router *gin.Engine Profiles []*csconfig.ProfileCfg @@ -83,7 +81,6 @@ func (c *Controller) NewV1() error { v1Config := v1.ControllerV1Config{ DbClient: c.DBClient, - Ctx: c.Ectx, ProfilesCfg: c.Profiles, DecisionDeleteChan: c.DecisionDeleteChan, AlertsAddChan: c.AlertsAddChan, diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go index 6de4abe3b3b..f8b6aa76ea5 100644 --- a/pkg/apiserver/controllers/v1/controller.go +++ b/pkg/apiserver/controllers/v1/controller.go @@ -1,7 +1,6 @@ package v1 import ( - "context" "fmt" "net" @@ -14,7 +13,6 @@ import ( ) type Controller struct { - Ectx context.Context DBClient *database.Client APIKeyHeader string Middlewares *middlewares.Middlewares @@ -31,7 +29,6 @@ type Controller struct { type ControllerV1Config struct { DbClient *database.Client - Ctx context.Context ProfilesCfg []*csconfig.ProfileCfg AlertsAddChan chan []*models.Alert @@ -52,7 +49,6 @@ func New(cfg *ControllerV1Config) (*Controller, error) { } v1 := &Controller{ - Ectx: cfg.Ctx, DBClient: cfg.DbClient, APIKeyHeader: middlewares.APIKeyHeader, Profiles: profiles, From 06adbe031d9928d239cca5b31857471f173b769f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Oct 2024 14:51:53 +0200 Subject: [PATCH 308/318] context propagation: apic, unit tests (#3271) * context propagation: apic * context propagation: unit tests --- .golangci.yml | 6 ++ pkg/apiserver/alerts_test.go | 144 ++++++++++++++-------------- pkg/apiserver/api_key_test.go | 2 +- pkg/apiserver/apic.go | 22 ++--- pkg/apiserver/apic_metrics_test.go | 2 +- pkg/apiserver/apic_test.go | 67 +++++++------ pkg/apiserver/apiserver_test.go | 4 +- pkg/apiserver/decisions_test.go | 101 ++++++++++--------- pkg/apiserver/heartbeat_test.go | 8 +- pkg/apiserver/usage_metrics_test.go | 12 +-- 10 files changed, 189 insertions(+), 179 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 54c0acb0644..a9d962610df 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -487,6 +487,12 @@ issues: path: "pkg/(.+)_test.go" text: "deep-exit: .*" + # we use t,ctx instead of ctx,t in tests + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "context-as-argument: context.Context should be the first parameter of a function" + # tolerate deep exit in cobra's OnInitialize, for now - linters: - revive diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index d801ff86f45..0e89ddb2137 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -26,11 +26,11 @@ type LAPI struct { DBConfig *csconfig.DatabaseCfg } -func SetupLAPITest(t *testing.T) LAPI { +func SetupLAPITest(t *testing.T, ctx context.Context) LAPI { t.Helper() - router, loginResp, config := InitMachineTest(t) + router, loginResp, config := InitMachineTest(t, ctx) - APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) + APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) return LAPI{ router: router, @@ -40,13 +40,12 @@ func SetupLAPITest(t *testing.T) LAPI { } } -func (l *LAPI) InsertAlertFromFile(t *testing.T, path string) *httptest.ResponseRecorder { +func (l *LAPI) InsertAlertFromFile(t *testing.T, ctx context.Context, path string) *httptest.ResponseRecorder { alertReader := GetAlertReaderFromFile(t, path) - return l.RecordResponse(t, http.MethodPost, "/v1/alerts", alertReader, "password") + return l.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertReader, "password") } -func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { - ctx := context.Background() +func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { w := httptest.NewRecorder() req, err := http.NewRequestWithContext(ctx, verb, url, body) require.NoError(t, err) @@ -65,18 +64,17 @@ func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strin return w } -func InitMachineTest(t *testing.T) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { +func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { router, config := NewAPITest(t) - loginResp := LoginToTestAPI(t, router, config) + loginResp := LoginToTestAPI(t, ctx, router, config) return router, loginResp, config } -func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { +func LoginToTestAPI(t *testing.T, ctx context.Context, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) - ctx := context.Background() w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) @@ -95,52 +93,55 @@ func AddAuthHeaders(request *http.Request, authResponse models.WatcherAuthRespon } func TestSimulatedAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk+simul.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk+simul.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_minibulk+simul.json") // exclude decision in simulation mode - w := lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) // include decision in simulation mode - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", alertContent, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) } func TestCreateAlert(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Alert with invalid format - w := lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") assert.Equal(t, 400, w.Code) assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") - w = lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", alertContent, "password") + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) // Create Valid Alert - w = lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + w = lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assert.Equal(t, 201, w.Code) assert.Equal(t, `["1"]`, w.Body.String()) } func TestCreateAlertChannels(t *testing.T) { + ctx := context.Background() apiServer, config := NewAPIServer(t) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) apiServer.InitController() - loginResp := LoginToTestAPI(t, apiServer.router, config) + loginResp := LoginToTestAPI(t, ctx, apiServer.router, config) lapi := LAPI{router: apiServer.router, loginResp: loginResp} var ( @@ -156,26 +157,27 @@ func TestCreateAlertChannels(t *testing.T) { wg.Done() }() - lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") wg.Wait() assert.Len(t, pd.Alert.Decisions, 1) apiServer.Close() } func TestAlertListFilters(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_ssh-bf.json") // bad filter - w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // get without filters - w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") @@ -183,184 +185,185 @@ func TestAlertListFilters(t *testing.T) { // test decision_type filter (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test decision_type filter (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test scope (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scope (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test scenario (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scenario (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test ip (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) // test range (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test range - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test range (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=ratata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) // test since (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1h", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1h", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test since (ok but yields no results) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1ns", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test since (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test until (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1ns", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test until (ok but no return) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1m", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1m", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test until (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test simulated (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test simulated (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test has active decision (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } func TestAlertBulkInsert(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // insert a bulk of 20 alerts to trigger bulk insert - lapi.InsertAlertFromFile(t, "./tests/alert_bulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_bulk.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") - w := lapi.RecordResponse(t, "GET", "/v1/alerts", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", alertContent, "password") assert.Equal(t, 200, w.Code) } func TestListAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // List Alert with invalid filter - w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", emptyBody, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert - w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "crowdsecurity/test") } func TestCreateAlertErrors(t *testing.T) { - lapi := SetupLAPITest(t) - alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") - ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") // test invalid bearer w := httptest.NewRecorder() @@ -380,10 +383,9 @@ func TestCreateAlertErrors(t *testing.T) { } func TestDeleteAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") - ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Fail Delete Alert w := httptest.NewRecorder() @@ -405,10 +407,9 @@ func TestDeleteAlert(t *testing.T) { } func TestDeleteAlertByID(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") - ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Fail Delete Alert w := httptest.NewRecorder() @@ -430,6 +431,7 @@ func TestDeleteAlertByID(t *testing.T) { } func TestDeleteAlertTrustedIPS(t *testing.T) { + ctx := context.Background() cfg := LoadTestConfig(t) // IPv6 mocking doesn't seem to work. // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} @@ -444,14 +446,12 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { router, err := server.Router() require.NoError(t, err) - loginResp := LoginToTestAPI(t, router, cfg) + loginResp := LoginToTestAPI(t, ctx, router, cfg) lapi := LAPI{ router: router, loginResp: loginResp, } - ctx := context.Background() - assertAlertDeleteFailedFromIP := func(ip string) { w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) @@ -475,17 +475,17 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) } - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeleteFailedFromIP("4.3.2.1") assertAlertDeletedFromIP("1.2.3.4") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.0") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.1") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.255") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("127.0.0.1") } diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 10e75ae47f1..014f255b892 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -15,7 +15,7 @@ func TestAPIKey(t *testing.T) { ctx := context.Background() - APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) + APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) // Login with empty token w := httptest.NewRecorder() diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index c8768e71b0a..e62bc663c16 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -454,11 +454,9 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet return nbDeleted, nil } -func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { +func (a *apic) HandleDeletedDecisionsV3(ctx context.Context, deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { var nbDeleted int - ctx := context.TODO() - for _, decisions := range deletedDecisions { scope := decisions.Scope @@ -676,7 +674,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { addCounters, deleteCounters := makeAddAndDeleteCounters() // process deleted decisions - nbDeleted, err := a.HandleDeletedDecisionsV3(data.Deleted, deleteCounters) + nbDeleted, err := a.HandleDeletedDecisionsV3(ctx, data.Deleted, deleteCounters) if err != nil { return err } @@ -697,7 +695,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { alertsFromCapi := []*models.Alert{alert} alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - err = a.SaveAlerts(alertsFromCapi, addCounters, deleteCounters) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters) if err != nil { return fmt.Errorf("while saving alerts: %w", err) } @@ -766,9 +764,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis return decisions[:outIdx] } -func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { - ctx := context.TODO() - +func (a *apic) SaveAlerts(ctx context.Context, alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { for _, alert := range alertsFromCapi { setAlertScenario(alert, addCounters, deleteCounters) log.Debugf("%s has %d decisions", *alert.Source.Scope, len(alert.Decisions)) @@ -788,13 +784,13 @@ func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string return nil } -func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bool, error) { +func (a *apic) ShouldForcePullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink) (bool, error) { // we should force pull if the blocklist decisions are about to expire or there's no decision in the db alertQuery := a.dbClient.Ent.Alert.Query() alertQuery.Where(alert.SourceScopeEQ(fmt.Sprintf("%s:%s", types.ListOrigin, *blocklist.Name))) alertQuery.Order(ent.Desc(alert.FieldCreatedAt)) - alertInstance, err := alertQuery.First(context.Background()) + alertInstance, err := alertQuery.First(ctx) if err != nil { if ent.IsNotFound(err) { log.Debugf("no alert found for %s, force refresh", *blocklist.Name) @@ -807,7 +803,7 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo decisionQuery := a.dbClient.Ent.Decision.Query() decisionQuery.Where(decision.HasOwnerWith(alert.IDEQ(alertInstance.ID))) - firstDecision, err := decisionQuery.First(context.Background()) + firstDecision, err := decisionQuery.First(ctx) if err != nil { if ent.IsNotFound(err) { log.Debugf("no decision found for %s, force refresh", *blocklist.Name) @@ -837,7 +833,7 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, } if !forcePull { - _forcePull, err := a.ShouldForcePullBlocklist(blocklist) + _forcePull, err := a.ShouldForcePullBlocklist(ctx, blocklist) if err != nil { return fmt.Errorf("while checking if we should force pull blocklist %s: %w", *blocklist.Name, err) } @@ -889,7 +885,7 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, alertsFromCapi := []*models.Alert{alert} alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - err = a.SaveAlerts(alertsFromCapi, addCounters, nil) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, nil) if err != nil { return fmt.Errorf("while saving alert from blocklist %s: %w", *blocklist.Name, err) } diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 13a24668f26..d81af03f710 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -76,7 +76,7 @@ func TestAPICSendMetrics(t *testing.T) { ) require.NoError(t, err) - api := getAPIC(t) + api := getAPIC(t, ctx) api.pushInterval = time.Millisecond api.pushIntervalFirst = time.Millisecond api.apiClient = apiClient diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a215edb2fbd..51b1f43c707 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -34,11 +34,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func getDBClient(t *testing.T) *database.Client { +func getDBClient(t *testing.T, ctx context.Context) *database.Client { t.Helper() - ctx := context.Background() - dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) dbClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ @@ -51,9 +49,9 @@ func getDBClient(t *testing.T) *database.Client { return dbClient } -func getAPIC(t *testing.T) *apic { +func getAPIC(t *testing.T, ctx context.Context) *apic { t.Helper() - dbClient := getDBClient(t) + dbClient := getDBClient(t, ctx) return &apic{ AlertsAddChan: make(chan []*models.Alert), @@ -84,8 +82,8 @@ func absDiff(a int, b int) int { return c } -func assertTotalDecisionCount(t *testing.T, dbClient *database.Client, count int) { - d := dbClient.Ent.Decision.Query().AllX(context.Background()) +func assertTotalDecisionCount(t *testing.T, ctx context.Context, dbClient *database.Client, count int) { + d := dbClient.Ent.Decision.Query().AllX(ctx) assert.Len(t, d, count) } @@ -111,9 +109,8 @@ func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { } func TestAPICCAPIPullIsOld(t *testing.T) { - api := getAPIC(t) - ctx := context.Background() + api := getAPIC(t, ctx) isOld, err := api.CAPIPullIsOld(ctx) require.NoError(t, err) @@ -169,7 +166,7 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api := getAPIC(t) + api := getAPIC(t, ctx) for machineID, scenarios := range tc.machineIDsWithScenarios { api.dbClient.Ent.Machine.Create(). SetMachineId(machineID). @@ -183,7 +180,7 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { require.NoError(t, err) for machineID := range tc.machineIDsWithScenarios { - api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(ctx) } assert.ElementsMatch(t, tc.expectedScenarios, scenarios) @@ -192,6 +189,8 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } func TestNewAPIC(t *testing.T) { + ctx := context.Background() + var testConfig *csconfig.OnlineApiClientCfg setConfig := func() { @@ -219,7 +218,7 @@ func TestNewAPIC(t *testing.T) { name: "simple", action: func() {}, args: args{ - dbClient: getDBClient(t), + dbClient: getDBClient(t, ctx), consoleConfig: LoadTestConfig(t).API.Server.ConsoleConfig, }, }, @@ -227,15 +226,13 @@ func TestNewAPIC(t *testing.T) { name: "error in parsing URL", action: func() { testConfig.Credentials.URL = "foobar http://" }, args: args{ - dbClient: getDBClient(t), + dbClient: getDBClient(t, ctx), consoleConfig: LoadTestConfig(t).API.Server.ConsoleConfig, }, expectedErr: "first path segment in URL cannot contain colon", }, } - ctx := context.Background() - for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { setConfig() @@ -259,7 +256,8 @@ func TestNewAPIC(t *testing.T) { } func TestAPICHandleDeletedDecisions(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) _, deleteCounters := makeAddAndDeleteCounters() decision1 := api.dbClient.Ent.Decision.Create(). @@ -280,7 +278,7 @@ func TestAPICHandleDeletedDecisions(t *testing.T) { SetOrigin(types.CAPIOrigin). SaveX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 2) + assertTotalDecisionCount(t, ctx, api.dbClient, 2) nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{ Value: ptr.Of("1.2.3.4"), @@ -359,7 +357,7 @@ func TestAPICGetMetrics(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - apiClient := getAPIC(t) + apiClient := getAPIC(t, ctx) cleanUp(apiClient) for i, machineID := range tc.machineIDs { @@ -370,7 +368,7 @@ func TestAPICGetMetrics(t *testing.T) { SetScenarios("crowdsecurity/test"). SetLastPush(time.Time{}). SetUpdatedAt(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) } for i, bouncerName := range tc.bouncers { @@ -380,7 +378,7 @@ func TestAPICGetMetrics(t *testing.T) { SetAPIKey("foobar"). SetRevoked(false). SetLastPull(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) } foundMetrics, err := apiClient.GetMetrics(ctx) @@ -555,7 +553,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { func TestAPICWhitelists(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} api.whitelists.Ips = append(api.whitelists.Ips, net.ParseIP("9.2.3.4"), net.ParseIP("7.2.3.4")) @@ -578,7 +576,7 @@ func TestAPICWhitelists(t *testing.T) { SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). ExecX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 1) + assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -693,7 +691,7 @@ func TestAPICWhitelists(t *testing.T) { err = api.PullTop(ctx, false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing + assertTotalDecisionCount(t, ctx, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -742,7 +740,7 @@ func TestAPICWhitelists(t *testing.T) { func TestAPICPullTop(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) api.dbClient.Ent.Decision.Create(). SetOrigin(types.CAPIOrigin). SetType("ban"). @@ -750,8 +748,8 @@ func TestAPICPullTop(t *testing.T) { SetScope("Ip"). SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). - ExecX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 1) + ExecX(ctx) + assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -835,7 +833,7 @@ func TestAPICPullTop(t *testing.T) { err = api.PullTop(ctx, false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) + assertTotalDecisionCount(t, ctx, api.dbClient, 5) assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -868,7 +866,7 @@ func TestAPICPullTop(t *testing.T) { func TestAPICPullTopBLCacheFirstCall(t *testing.T) { ctx := context.Background() // no decision in db, no last modified parameter. - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -943,7 +941,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { func TestAPICPullTopBLCacheForceCall(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -1019,7 +1017,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { func TestAPICPullBlocklistCall(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -1052,6 +1050,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { } func TestAPICPush(t *testing.T) { + ctx := context.Background() tests := []struct { name string alerts []*models.Alert @@ -1105,7 +1104,7 @@ func TestAPICPush(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api := getAPIC(t) + api := getAPIC(t, ctx) api.pushInterval = time.Millisecond api.pushIntervalFirst = time.Millisecond url, err := url.ParseRequestURI("http://api.crowdsec.net/") @@ -1144,7 +1143,7 @@ func TestAPICPush(t *testing.T) { func TestAPICPull(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) tests := []struct { name string setUp func() @@ -1172,7 +1171,7 @@ func TestAPICPull(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api = getAPIC(t) + api = getAPIC(t, ctx) api.pullInterval = time.Millisecond api.pullIntervalFirst = time.Millisecond url, err := url.ParseRequestURI("http://api.crowdsec.net/") @@ -1223,7 +1222,7 @@ func TestAPICPull(t *testing.T) { time.Sleep(time.Millisecond * 500) logrus.SetOutput(os.Stderr) assert.Contains(t, buf.String(), tc.logContains) - assertTotalDecisionCount(t, api.dbClient, tc.expectedDecisionCount) + assertTotalDecisionCount(t, ctx, api.dbClient, tc.expectedDecisionCount) }) } } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 081fa23f15e..c3f69c5c365 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -288,9 +288,7 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { return body } -func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { - ctx := context.Background() - +func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.DatabaseCfg) string { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index 1c70c495a3a..a0af6956443 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -12,82 +13,86 @@ const ( ) func TestDeleteDecisionRange(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by ip wrong - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by range - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) // delete by range : ensure it was already deleted - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) } func TestDeleteDecisionFilter(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by ip wrong - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by ip good - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) // delete by scope/value - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteDecisionFilterByScenario(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by wrong scenario - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by scenario good - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) } func TestGetDecisionFilters(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // Get Decision - w := lapi.RecordResponse(t, "GET", "/v1/decisions", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code := readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -101,7 +106,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : type filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?type=ban", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?type=ban", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -118,7 +123,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : scope/value - w = lapi.RecordResponse(t, "GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -132,7 +137,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : ip filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -145,7 +150,7 @@ func TestGetDecisionFilters(t *testing.T) { // assert.NotContains(t, w.Body.String(), `"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.178"`) // Get decision : by range - w = lapi.RecordResponse(t, "GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -155,13 +160,14 @@ func TestGetDecisionFilters(t *testing.T) { } func TestGetDecision(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Get Decision - w := lapi.RecordResponse(t, "GET", "/v1/decisions", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code := readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -180,51 +186,52 @@ func TestGetDecision(t *testing.T) { assert.Equal(t, int64(3), decisions[2].ID) // Get Decision with invalid filter. It should ignore this filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?test=test", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?test=test", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) assert.Len(t, decisions, 3) } func TestDeleteDecisionByID(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Have one alert - w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) assert.Len(t, decisions["new"], 1) // Delete alert with Invalid ID - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/test", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/test", emptyBody, PASSWORD) assert.Equal(t, 400, w.Code) errResp, _ := readDecisionsErrorResp(t, w) assert.Equal(t, "decision_id must be valid integer", errResp["message"]) // Delete alert with ID that not exist - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/100", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/100", emptyBody, PASSWORD) assert.Equal(t, 500, w.Code) errResp, _ = readDecisionsErrorResp(t, w) assert.Equal(t, "decision with id '100' doesn't exist: unable to delete", errResp["message"]) // Have one alert - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) assert.Len(t, decisions["new"], 1) // Delete alert with valid ID - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "1", resp.NbDeleted) // Have one alert (because we delete an alert that has dup targets) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -232,33 +239,35 @@ func TestDeleteDecisionByID(t *testing.T) { } func TestDeleteDecision(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Delete alert with Invalid filter - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) assert.Equal(t, 500, w.Code) errResp, _ := readDecisionsErrorResp(t, w) assert.Equal(t, "'test' doesn't exist: invalid filter", errResp["message"]) // Delete all alert - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "3", resp.NbDeleted) } func TestStreamStartDecisionDedup(t *testing.T) { + ctx := context.Background() // Ensure that at stream startup we only get the longest decision - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) // Create Valid Alert : 3 decisions for 127.0.0.1, longest has id=3 - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Get Stream, we only get one decision (the longest one) - w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -268,11 +277,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // id=3 decision is deleted, this won't affect `deleted`, because there are decisions on the same ip - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/3", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/3", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // Get Stream, we only get one decision (the longest one, id=2) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -282,11 +291,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // We delete another decision, yet don't receive it in stream, since there's another decision on same IP - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/2", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/2", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // And get the remaining decision (1) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -296,11 +305,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // We delete the last decision, we receive the delete order - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // and now we only get a deleted decision - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Len(t, decisions["deleted"], 1) diff --git a/pkg/apiserver/heartbeat_test.go b/pkg/apiserver/heartbeat_test.go index fbf01c7fb8e..db051566f75 100644 --- a/pkg/apiserver/heartbeat_test.go +++ b/pkg/apiserver/heartbeat_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "testing" @@ -8,11 +9,12 @@ import ( ) func TestHeartBeat(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) - w := lapi.RecordResponse(t, http.MethodGet, "/v1/heartbeat", emptyBody, "password") + w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 200, w.Code) - w = lapi.RecordResponse(t, "POST", "/v1/heartbeat", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "POST", "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 405, w.Code) } diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 4d17b0d6ed8..32aeb7d9a5a 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -187,14 +187,14 @@ func TestLPMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) - dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + dbClient, err := database.NewClient(ctx, lapi.DBConfig) if err != nil { t.Fatalf("unable to create database client: %s", err) } - w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) @@ -359,14 +359,14 @@ func TestRCMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) - dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + dbClient, err := database.NewClient(ctx, lapi.DBConfig) if err != nil { t.Fatalf("unable to create database client: %s", err) } - w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) From 4e3495dbba38d65705c79f946b8da8fb91ac6252 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Oct 2024 15:27:48 +0200 Subject: [PATCH 309/318] lint/revive: check tags on non-exported struct fields (#3257) * lint/revive: check tags on non-exported struct fields * update .golangci.yaml; remove full list of enabled linters * lint --- .golangci.yml | 69 +-------------------------- cmd/crowdsec-cli/climetrics/show.go | 3 +- cmd/crowdsec-cli/dashboard.go | 3 +- pkg/appsec/appsec_rules_collection.go | 10 ++-- pkg/leakybucket/manager_load.go | 22 ++++----- 5 files changed, 20 insertions(+), 87 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a9d962610df..786bb18d8e7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -135,14 +135,10 @@ linters-settings: arguments: [7] - name: max-public-structs disabled: true - - name: optimize-operands-order - disabled: true - name: nested-structs disabled: true - name: package-comments disabled: true - - name: struct-tag - disabled: true - name: redundant-import-alias disabled: true - name: time-equal @@ -217,6 +213,7 @@ linters: # - execinquery - exportloopref + - gomnd # # Redundant @@ -232,69 +229,6 @@ linters: - intrange # intrange is a linter to find places where for loops could make use of an integer range. - # - # Enabled - # - - # - asasalint # check for pass []any as any in variadic func(...any) - # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name - # - bidichk # Checks for dangerous unicode character sequences - # - bodyclose # checks whether HTTP response body is closed successfully - # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied - # - decorder # check declaration order and count of types, constants, variables and functions - # - depguard # Go linter that checks if package imports are in a list of acceptable packages - # - dupword # checks for duplicate words in the source code - # - durationcheck # check for two durations multiplied together - # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases - # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - ginkgolinter # enforces standards of using ginkgo and gomega - # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. - # - gochecknoinits # Checks that no init functions are present in Go code - # - gochecksumtype # Run exhaustiveness checks on Go "sum types" - # - gocritic # Provides diagnostics that check for bugs, performance and style issues. - # - goheader # Checks is file header matches to pattern - # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. - # - goprintffuncname # Checks that printf-like functions are named with `f` at the end - # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying code - # - gosmopolitan # Report certain i18n/l10n anti-patterns in your Go codebase - # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs. It is roughly the same as 'go vet' and uses its passes. - # - grouper # Analyze expression groups. - # - importas # Enforces consistent import aliases - # - ineffassign # Detects when assignments to existing variables are not used - # - interfacebloat # A linter that checks the number of methods inside an interface. - # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). - # - logrlint # Check logr arguments. - # - maintidx # maintidx measures the maintainability index of each function. - # - makezero # Finds slice declarations with non-zero initial length - # - mirror # reports wrong mirror patterns of bytes/strings usage - # - misspell # Finds commonly misspelled English words - # - nakedret # Checks that functions with naked returns are not longer than a maximum size (can be zero). - # - nestif # Reports deeply nested if statements - # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. - # - nolintlint # Reports ill-formed or insufficient nolint directives - # - nonamedreturns # Reports all named returns - # - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. - # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. - # - predeclared # find code that shadows one of Go's predeclared identifiers - # - reassign # Checks that package variables are not reassigned - # - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - # - rowserrcheck # checks whether Rows.Err of rows is checked successfully - # - sloglint # ensure consistent code style when using log/slog - # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. - # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. - # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. - # - stylecheck # Stylecheck is a replacement for golint - # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 - # - testableexamples # linter checks if examples are testable (have an expected output) - # - testifylint # Checks usage of github.com/stretchr/testify. - # - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - # - unconvert # Remove unnecessary type conversions - # - unused # (megacheck): Checks Go code for unused constants, variables, functions and types - # - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library. - # - wastedassign # Finds wasted assignment statements - # - zerologlint # Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg` - # # Recommended? (easy) # @@ -322,7 +256,6 @@ linters: - containedctx # containedctx is a linter that detects struct contained context.Context field - contextcheck # check whether the function uses a non-inherited context - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types - mnd # An analyzer to detect magic numbers. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index 7559463b66b..045959048f6 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -5,9 +5,8 @@ import ( "errors" "fmt" - log "github.com/sirupsen/logrus" - "github.com/fatih/color" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 13cebe3dbd5..41db9e6cbf2 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -20,10 +20,11 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" - "github.com/crowdsecurity/go-cs-lib/version" ) var ( diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index 09c1670de70..d283f95cb19 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -29,11 +29,11 @@ type AppsecCollectionConfig struct { SecLangRules []string `yaml:"seclang_rules"` Rules []appsec_rule.CustomRule `yaml:"rules"` - Labels map[string]interface{} `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow + Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow - Data interface{} `yaml:"data"` //Ignore it - hash string `yaml:"-"` - version string `yaml:"-"` + Data interface{} `yaml:"data"` // Ignore it + hash string + version string } type RulesDetails struct { @@ -108,7 +108,7 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro logger.Debugf("Adding rule %s", strRule) appsecCol.Rules = append(appsecCol.Rules, strRule) - //We only take the first id, as it's the one of the "main" rule + // We only take the first id, as it's the one of the "main" rule if _, ok := AppsecRulesDetails[int(rulesId[0])]; !ok { AppsecRulesDetails[int(rulesId[0])] = RulesDetails{ LogLevel: log.InfoLevel, diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1b62b29dc3c..b8310b8cb17 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -45,12 +45,12 @@ type BucketFactory struct { Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration - logger *log.Entry `yaml:"-"` // logger is bucket-specific logger (used by Debug as well) - Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain - CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket - Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. - OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through - ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow + logger *log.Entry // logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through + ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow BayesianPrior float32 `yaml:"bayesian_prior"` BayesianThreshold float32 `yaml:"bayesian_threshold"` BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` // conditions for the bayesian bucket @@ -68,11 +68,11 @@ type BucketFactory struct { processors []Processor // processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) output bool // ?? ScenarioVersion string `yaml:"version,omitempty"` - hash string `yaml:"-"` - Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list - tomb *tomb.Tomb `yaml:"-"` - wgPour *sync.WaitGroup `yaml:"-"` - wgDumpState *sync.WaitGroup `yaml:"-"` + hash string + Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list + tomb *tomb.Tomb + wgPour *sync.WaitGroup + wgDumpState *sync.WaitGroup orderEvent bool } From f6af791b835991d9c77deee6817eb544700016fc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:28:18 +0200 Subject: [PATCH 310/318] CI: update test dependencies (#3267) --- docker/test/Pipfile.lock | 331 ++++++++++++++++++++------------------- 1 file changed, 174 insertions(+), 157 deletions(-) diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock index 2cb587b6b88..99184d9f2a2 100644 --- a/docker/test/Pipfile.lock +++ b/docker/test/Pipfile.lock @@ -18,69 +18,84 @@ "default": { "certifi": { "hashes": [ - "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", - "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" ], "markers": "python_version >= '3.6'", - "version": "==2024.7.4" + "version": "==2024.8.30" }, "cffi": { "hashes": [ - "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", - "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", - "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", - "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", - "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", - "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", - "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", - "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", - "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", - "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", - "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", - "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", - "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", - "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", - "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", - "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", - "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", - "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", - "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", - "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", - "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", - "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", - "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", - "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", - "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", - "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", - "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", - "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", - "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", - "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", - "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", - "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", - "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", - "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", - "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", - "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", - "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", - "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", - "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", - "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", - "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", - "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", - "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", - "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", - "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", - "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", - "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", - "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", - "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", - "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", - "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", - "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", + "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", + "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", + "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", + "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", + "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", + "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", + "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", + "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", + "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", + "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", + "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", + "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", + "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", + "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", + "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", + "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", + "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", + "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", + "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", + "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", + "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", + "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", + "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", + "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", + "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", + "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", + "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", + "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", + "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", + "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", + "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", + "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", + "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", + "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", + "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", + "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", + "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", + "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", + "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", + "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", + "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", + "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", + "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", + "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", + "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", + "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", + "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", + "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", + "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", + "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", + "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", + "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", + "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", + "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", + "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", + "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", + "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", + "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", + "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", + "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", + "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", + "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", + "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", + "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", + "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", + "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" ], "markers": "platform_python_implementation != 'PyPy'", - "version": "==1.16.0" + "version": "==1.17.1" }, "charset-normalizer": { "hashes": [ @@ -180,36 +195,36 @@ }, "cryptography": { "hashes": [ - "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", - "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069", - "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2", - "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", - "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", - "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70", - "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", - "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", - "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", - "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", - "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", - "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f", - "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947", - "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", - "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", - "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", - "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66", - "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", - "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f", - "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", - "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", - "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", - "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", - "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1", - "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", - "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", - "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0" + "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494", + "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806", + "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d", + "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062", + "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2", + "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4", + "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1", + "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85", + "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84", + "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042", + "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d", + "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962", + "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2", + "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa", + "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d", + "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365", + "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96", + "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47", + "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d", + "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d", + "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c", + "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb", + "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277", + "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172", + "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034", + "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a", + "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289" ], "markers": "python_version >= '3.7'", - "version": "==43.0.0" + "version": "==43.0.1" }, "docker": { "hashes": [ @@ -229,11 +244,11 @@ }, "idna": { "hashes": [ - "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", - "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" ], - "markers": "python_version >= '3.5'", - "version": "==3.7" + "markers": "python_version >= '3.6'", + "version": "==3.10" }, "iniconfig": { "hashes": [ @@ -292,11 +307,11 @@ }, "pytest": { "hashes": [ - "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6", - "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c" + "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", + "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2" ], "markers": "python_version >= '3.8'", - "version": "==8.3.1" + "version": "==8.3.3" }, "pytest-cs": { "git": "https://github.com/crowdsecurity/pytest-cs.git", @@ -337,60 +352,62 @@ }, "pyyaml": { "hashes": [ - "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", - "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", - "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", - "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", - "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", - "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", - "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", - "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", - "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", - "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", - "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", - "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", - "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", - "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", - "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", - "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", - "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", - "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", - "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", - "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", - "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", - "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", - "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", - "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", - "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", - "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", - "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", - "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", - "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", - "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", - "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", - "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", - "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", - "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", - "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", - "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", - "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", - "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", - "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", - "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", - "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", - "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", - "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", - "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", - "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", - "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", - "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", - "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", - "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", - "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", - "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" ], - "markers": "python_version >= '3.6'", - "version": "==6.0.1" + "markers": "python_version >= '3.8'", + "version": "==6.0.2" }, "requests": { "hashes": [ @@ -410,11 +427,11 @@ }, "urllib3": { "hashes": [ - "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", - "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" ], "markers": "python_version >= '3.8'", - "version": "==2.2.2" + "version": "==2.2.3" } }, "develop": { @@ -435,11 +452,11 @@ }, "executing": { "hashes": [ - "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147", - "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc" + "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", + "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" ], - "markers": "python_version >= '3.5'", - "version": "==2.0.1" + "markers": "python_version >= '3.8'", + "version": "==2.1.0" }, "gnureadline": { "hashes": [ @@ -485,11 +502,11 @@ }, "ipython": { "hashes": [ - "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c", - "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff" + "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a", + "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35" ], "markers": "python_version >= '3.11'", - "version": "==8.26.0" + "version": "==8.28.0" }, "jedi": { "hashes": [ @@ -525,11 +542,11 @@ }, "prompt-toolkit": { "hashes": [ - "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", - "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360" + "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", + "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e" ], "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.47" + "version": "==3.0.48" }, "ptyprocess": { "hashes": [ From 40021b6bcf40cb724c4444c79239223c0419ec71 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 4 Oct 2024 13:15:16 +0200 Subject: [PATCH 311/318] CI: update coverage ignore list for generated code (#3262) * CI: update coverage ignore list for generated code * CI: generate codecov.yml automatically --- .github/codecov-ignore-generated.sh | 17 +++ .github/codecov.yml | 139 --------------------- .github/workflows/bats-sqlite-coverage.yml | 4 + .github/workflows/go-tests-windows.yml | 4 + .github/workflows/go-tests.yml | 4 + .gitignore | 3 + 6 files changed, 32 insertions(+), 139 deletions(-) delete mode 100644 .github/codecov.yml diff --git a/.github/codecov-ignore-generated.sh b/.github/codecov-ignore-generated.sh index 3c896d47be7..0f59b1de4a0 100755 --- a/.github/codecov-ignore-generated.sh +++ b/.github/codecov-ignore-generated.sh @@ -4,6 +4,23 @@ # # .github/codecov-ignore-generated.sh >> .github/codecov.yml +cat <>>>> $file"; cat $file; echo; done if: ${{ always() }} + - name: Ignore-list of generated files for codecov + run: | + .github/codecov-ignore-generated.sh >> .github/codecov.yml + - name: Upload bats coverage to codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index a31e42cf702..488756d0059 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -47,6 +47,10 @@ jobs: if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + - name: Ignore-list of generated files for codecov + run: | + .github/codecov-ignore-generated.sh >> .github/codecov.yml + - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index a6c72a91af6..52de434ad9b 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -165,6 +165,10 @@ jobs: set -o pipefail make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + - name: Ignore-list of generated files for codecov + run: | + .github/codecov-ignore-generated.sh >> .github/codecov.yml + - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.gitignore b/.gitignore index 6e6624fd282..d76efcbfc48 100644 --- a/.gitignore +++ b/.gitignore @@ -60,3 +60,6 @@ msi __pycache__ *.py[cod] *.egg-info + +# automatically generated before running codecov +.github/codecov.yml From b9bccfa56f3393dccf19ca97b4a2673efc0feaff Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:06:03 +0200 Subject: [PATCH 312/318] context propagation: pkg/apiserver (#3272) * context propagation: apic.Push() * context propagation: NewServer() * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/clipapi/papi.go | 2 +- cmd/crowdsec/api.go | 10 ++++++-- cmd/crowdsec/serve.go | 10 ++++---- pkg/apiserver/alerts_test.go | 6 ++--- pkg/apiserver/api_key_test.go | 3 +-- pkg/apiserver/apic.go | 12 +++++----- pkg/apiserver/apic_test.go | 2 +- pkg/apiserver/apiserver.go | 10 ++++---- pkg/apiserver/apiserver_test.go | 40 ++++++++++++++++---------------- pkg/apiserver/jwt_test.go | 3 +-- pkg/apiserver/machines_test.go | 20 ++++++---------- 12 files changed, 59 insertions(+), 61 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 786bb18d8e7..4909d3e60c0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -321,7 +321,7 @@ issues: # `err` is often shadowed, we may continue to do it - linters: - govet - text: "shadow: declaration of \"err\" shadows declaration" + text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - linters: - errcheck diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index b8101a0fb34..461215c3a39 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -127,7 +127,7 @@ func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client return fmt.Errorf("unable to initialize API client: %w", err) } - t.Go(apic.Push) + t.Go(func() error { return apic.Push(ctx) }) papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index c57b8d87cff..6ab41def16f 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -1,6 +1,7 @@ package main import ( + "context" "errors" "fmt" "runtime" @@ -14,12 +15,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { +func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.APIServer, error) { if cConfig.API.Server.OnlineClient == nil || cConfig.API.Server.OnlineClient.Credentials == nil { log.Info("push and pull to Central API disabled") } - apiServer, err := apiserver.NewServer(cConfig.API.Server) + apiServer, err := apiserver.NewServer(ctx, cConfig.API.Server) if err != nil { return nil, fmt.Errorf("unable to run local API: %w", err) } @@ -58,11 +59,14 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { func serveAPIServer(apiServer *apiserver.APIServer) { apiReady := make(chan bool, 1) + apiTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveAPIServer") + go func() { defer trace.CatchPanic("crowdsec/runAPIServer") log.Debugf("serving API after %s ms", time.Since(crowdsecT0)) + if err := apiServer.Run(apiReady); err != nil { log.Fatal(err) } @@ -76,6 +80,7 @@ func serveAPIServer(apiServer *apiserver.APIServer) { <-apiTomb.Dying() // lock until go routine is dying pluginTomb.Kill(nil) log.Infof("serve: shutting down api server") + return apiServer.Shutdown() }) <-apiReady @@ -87,5 +92,6 @@ func hasPlugins(profiles []*csconfig.ProfileCfg) bool { return true } } + return false } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index f1a658e9512..14602c425fe 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -52,6 +52,8 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { func reloadHandler(sig os.Signal) (*csconfig.Config, error) { var tmpFile string + ctx := context.TODO() + // re-initialize tombs acquisTomb = tomb.Tomb{} parsersTomb = tomb.Tomb{} @@ -74,7 +76,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { cConfig.API.Server.OnlineClient = nil } - apiServer, err := initAPIServer(cConfig) + apiServer, err := initAPIServer(ctx, cConfig) if err != nil { return nil, fmt.Errorf("unable to init api server: %w", err) } @@ -88,7 +90,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { return nil, err } - if err := hub.Load(); err != nil { + if err = hub.Load(); err != nil { return nil, err } @@ -374,7 +376,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { cConfig.API.Server.OnlineClient = nil } - apiServer, err := initAPIServer(cConfig) + apiServer, err := initAPIServer(ctx, cConfig) if err != nil { return fmt.Errorf("api server init: %w", err) } @@ -390,7 +392,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return err } - if err := hub.Load(); err != nil { + if err = hub.Load(); err != nil { return err } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 0e89ddb2137..cd981f76542 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -65,7 +65,7 @@ func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, ur } func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { - router, config := NewAPITest(t) + router, config := NewAPITest(t, ctx) loginResp := LoginToTestAPI(t, ctx, router, config) return router, loginResp, config @@ -137,7 +137,7 @@ func TestCreateAlert(t *testing.T) { func TestCreateAlertChannels(t *testing.T) { ctx := context.Background() - apiServer, config := NewAPIServer(t) + apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) apiServer.InitController() @@ -437,7 +437,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24"} cfg.API.Server.ListenURI = "::8080" - server, err := NewServer(cfg.API.Server) + server, err := NewServer(ctx, cfg.API.Server) require.NoError(t, err) err = server.InitController() diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 014f255b892..e6ed68a6e0d 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -11,9 +11,8 @@ import ( ) func TestAPIKey(t *testing.T) { - router, config := NewAPITest(t) - ctx := context.Background() + router, config := NewAPITest(t, ctx) APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index e62bc663c16..a2fb0e85749 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -256,7 +256,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient } // keep track of all alerts in cache and push it to CAPI every PushInterval. -func (a *apic) Push() error { +func (a *apic) Push(ctx context.Context) error { defer trace.CatchPanic("lapi/pushToAPIC") var cache models.AddSignalsRequest @@ -276,7 +276,7 @@ func (a *apic) Push() error { return nil } - go a.Send(&cache) + go a.Send(ctx, &cache) return nil case <-ticker.C: @@ -289,7 +289,7 @@ func (a *apic) Push() error { a.mu.Unlock() log.Infof("Signal push: %d signals to push", len(cacheCopy)) - go a.Send(&cacheCopy) + go a.Send(ctx, &cacheCopy) } case alerts := <-a.AlertsAddChan: var signals []*models.AddSignalsRequestItem @@ -351,7 +351,7 @@ func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig return true } -func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { +func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) { /*we do have a problem with this : The apic.Push background routine reads from alertToPush chan. This chan is filled by Controller.CreateAlert @@ -375,7 +375,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { for { if pageEnd >= len(cache) { send = cache[pageStart:] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -389,7 +389,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { } send = cache[pageStart:pageEnd] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 51b1f43c707..b52dc9e44cc 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1134,7 +1134,7 @@ func TestAPICPush(t *testing.T) { api.Shutdown() }() - err = api.Push() + err = api.Push(ctx) require.NoError(t, err) assert.Equal(t, tc.expectedCalls, httpmock.GetTotalCallCount()) }) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 8fe500c7f52..bdf2d4148cc 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -159,11 +159,9 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro // NewServer creates a LAPI server. // It sets up a gin router, a database client, and a controller. -func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { +func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APIServer, error) { var flushScheduler *gocron.Scheduler - ctx := context.TODO() - dbClient, err := database.NewClient(ctx, config.DbConfig) if err != nil { return nil, fmt.Errorf("unable to init database client: %w", err) @@ -300,8 +298,8 @@ func (s *APIServer) Router() (*gin.Engine, error) { return s.router, nil } -func (s *APIServer) apicPush() error { - if err := s.apic.Push(); err != nil { +func (s *APIServer) apicPush(ctx context.Context) error { + if err := s.apic.Push(ctx); err != nil { log.Errorf("capi push: %s", err) return err } @@ -337,7 +335,7 @@ func (s *APIServer) papiSync() error { } func (s *APIServer) initAPIC(ctx context.Context) { - s.apic.pushTomb.Go(s.apicPush) + s.apic.pushTomb.Go(func() error { return s.apicPush(ctx) }) s.apic.pullTomb.Go(func() error { return s.apicPull(ctx) }) // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index c3f69c5c365..b04ad687e4e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -3,7 +3,6 @@ package apiserver import ( "context" "encoding/json" - "fmt" "net/http" "net/http/httptest" "os" @@ -41,7 +40,7 @@ var ( MachineID: &testMachineID, Password: &testPassword, } - UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) + UserAgent = "crowdsec-test/" + version.Version emptyBody = strings.NewReader("") ) @@ -135,12 +134,12 @@ func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { return config } -func NewAPIServer(t *testing.T) (*APIServer, csconfig.Config) { +func NewAPIServer(t *testing.T, ctx context.Context) (*APIServer, csconfig.Config) { config := LoadTestConfig(t) os.Remove("./ent") - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) require.NoError(t, err) log.Printf("Creating new API server") @@ -149,8 +148,8 @@ func NewAPIServer(t *testing.T) (*APIServer, csconfig.Config) { return apiServer, config } -func NewAPITest(t *testing.T) (*gin.Engine, csconfig.Config) { - apiServer, config := NewAPIServer(t) +func NewAPITest(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { + apiServer, config := NewAPIServer(t, ctx) err := apiServer.InitController() require.NoError(t, err) @@ -161,12 +160,12 @@ func NewAPITest(t *testing.T) (*gin.Engine, csconfig.Config) { return router, config } -func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { +func NewAPITestForwardedFor(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { config := LoadTestConfigForwardedFor(t) os.Remove("./ent") - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) require.NoError(t, err) err = apiServer.InitController() @@ -302,28 +301,29 @@ func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.Datab } func TestWithWrongDBConfig(t *testing.T) { + ctx := context.Background() config := LoadTestConfig(t) config.API.Server.DbConfig.Type = "test" - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) cstest.RequireErrorContains(t, err, "unable to init database client: unknown database type 'test'") assert.Nil(t, apiServer) } func TestWithWrongFlushConfig(t *testing.T) { + ctx := context.Background() config := LoadTestConfig(t) maxItems := -1 config.API.Server.DbConfig.Flush.MaxItems = &maxItems - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) cstest.RequireErrorContains(t, err, "max_items can't be zero or negative") assert.Nil(t, apiServer) } func TestUnknownPath(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", nil) @@ -349,6 +349,8 @@ ListenURI string `yaml:"listen_uri,omitempty"` //127.0 */ func TestLoggingDebugToFileConfig(t *testing.T) { + ctx := context.Background() + /*declare settings*/ maxAge := "1h" flushConfig := csconfig.FlushDBCfg{ @@ -370,7 +372,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { LogDir: tempDir, DbConfig: &dbconfig, } - expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedFile := filepath.Join(tempDir, "crowdsec_api.log") expectedLines := []string{"/test42"} cfg.LogLevel = ptr.Of(log.DebugLevel) @@ -378,12 +380,10 @@ func TestLoggingDebugToFileConfig(t *testing.T) { err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) require.NoError(t, err) - api, err := NewServer(&cfg) + api, err := NewServer(ctx, &cfg) require.NoError(t, err) require.NotNil(t, api) - ctx := context.Background() - w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) @@ -402,6 +402,8 @@ func TestLoggingDebugToFileConfig(t *testing.T) { } func TestLoggingErrorToFileConfig(t *testing.T) { + ctx := context.Background() + /*declare settings*/ maxAge := "1h" flushConfig := csconfig.FlushDBCfg{ @@ -423,19 +425,17 @@ func TestLoggingErrorToFileConfig(t *testing.T) { LogDir: tempDir, DbConfig: &dbconfig, } - expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedFile := filepath.Join(tempDir, "crowdsec_api.log") cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) require.NoError(t, err) - api, err := NewServer(&cfg) + api, err := NewServer(ctx, &cfg) require.NoError(t, err) require.NotNil(t, api) - ctx := context.Background() - w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 293cc38bd2c..710cf82ad00 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -11,9 +11,8 @@ import ( ) func TestLogin(t *testing.T) { - router, config := NewAPITest(t) - ctx := context.Background() + router, config := NewAPITest(t, ctx) body := CreateTestMachine(t, router, "") diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 44c370732c7..e60cec30e54 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -15,9 +15,8 @@ import ( ) func TestCreateMachine(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) // Create machine with invalid format w := httptest.NewRecorder() @@ -53,10 +52,9 @@ func TestCreateMachine(t *testing.T) { } func TestCreateMachineWithForwardedFor(t *testing.T) { - router, config := NewAPITestForwardedFor(t) - router.TrustedPlatform = "X-Real-IP" - ctx := context.Background() + router, config := NewAPITestForwardedFor(t, ctx) + router.TrustedPlatform = "X-Real-IP" // Create machine b, err := json.Marshal(MachineTest) @@ -79,9 +77,8 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { } func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { - router, config := NewAPITest(t) - ctx := context.Background() + router, config := NewAPITest(t, ctx) // Create machine b, err := json.Marshal(MachineTest) @@ -106,9 +103,8 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { } func TestCreateMachineWithoutForwardedFor(t *testing.T) { - router, config := NewAPITestForwardedFor(t) - ctx := context.Background() + router, config := NewAPITestForwardedFor(t, ctx) // Create machine b, err := json.Marshal(MachineTest) @@ -132,9 +128,8 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { } func TestCreateMachineAlreadyExist(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) body := CreateTestMachine(t, router, "") @@ -153,9 +148,8 @@ func TestCreateMachineAlreadyExist(t *testing.T) { } func TestAutoRegistration(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) // Invalid registration token / valid source IP regReq := MachineTest From 4ea0537d0bda7b56de2b0d7712cf1611965ad213 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:09:26 +0200 Subject: [PATCH 313/318] CI: generate codecov.yml before tests (#3280) --- ...odecov-ignore-generated.sh => generate-codecov-yml.sh} | 2 +- .github/workflows/bats-sqlite-coverage.yml | 8 ++++---- .github/workflows/go-tests-windows.yml | 8 ++++---- .github/workflows/go-tests.yml | 8 ++++++-- 4 files changed, 15 insertions(+), 11 deletions(-) rename .github/{codecov-ignore-generated.sh => generate-codecov-yml.sh} (89%) diff --git a/.github/codecov-ignore-generated.sh b/.github/generate-codecov-yml.sh similarity index 89% rename from .github/codecov-ignore-generated.sh rename to .github/generate-codecov-yml.sh index 0f59b1de4a0..cc2d652e339 100755 --- a/.github/codecov-ignore-generated.sh +++ b/.github/generate-codecov-yml.sh @@ -2,7 +2,7 @@ # Run this from the repository root: # -# .github/codecov-ignore-generated.sh >> .github/codecov.yml +# .github/generate-codecov-yml.sh >> .github/codecov.yml cat <> .github/codecov.yml + - name: "Run tests" run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter @@ -79,10 +83,6 @@ jobs: run: for file in $(find ./test/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done if: ${{ always() }} - - name: Ignore-list of generated files for codecov - run: | - .github/codecov-ignore-generated.sh >> .github/codecov.yml - - name: Upload bats coverage to codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 488756d0059..ba283f3890a 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -40,6 +40,10 @@ jobs: run: | make build BUILD_RE2_WASM=1 + - name: Generate codecov configuration + run: | + .github/generate-codecov-yml.sh >> .github/codecov.yml + - name: Run tests run: | go install github.com/kyoh86/richgo@v0.3.10 @@ -47,10 +51,6 @@ jobs: if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - - name: Ignore-list of generated files for codecov - run: | - .github/codecov-ignore-generated.sh >> .github/codecov.yml - - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 52de434ad9b..225c1b57628 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -145,6 +145,10 @@ jobs: aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-1-shard --shard-count 1 aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-2-shards --shard-count 2 + - name: Generate codecov configuration + run: | + .github/generate-codecov-yml.sh >> .github/codecov.yml + - name: Build and run tests, static run: | sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential libre2-dev @@ -165,9 +169,9 @@ jobs: set -o pipefail make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - - name: Ignore-list of generated files for codecov + - name: Generate codecov configuration run: | - .github/codecov-ignore-generated.sh >> .github/codecov.yml + .github/generate-codecov-yml.sh >> .github/codecov.yml - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 From 50d115b9146beeaae9788d81f5e2c5fae0520de3 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 10 Oct 2024 15:54:25 +0200 Subject: [PATCH 314/318] Update protobufs (#3276) * deps: update protobufs; add pkg/protobufs/generate.go * generate protobuf in CI * make: remove generate target * pin protoc --- .github/workflows/go-tests.yml | 15 ++- Makefile | 6 -- cmd/notification-dummy/main.go | 4 +- cmd/notification-email/main.go | 4 +- cmd/notification-file/main.go | 4 +- cmd/notification-http/main.go | 4 +- cmd/notification-sentinel/main.go | 4 +- cmd/notification-slack/main.go | 4 +- cmd/notification-splunk/main.go | 4 +- go.mod | 24 ++--- go.sum | 48 ++++----- pkg/csplugin/broker.go | 8 +- pkg/csplugin/notifier.go | 14 ++- pkg/protobufs/generate.go | 14 +++ pkg/protobufs/notifier.pb.go | 132 ++----------------------- pkg/protobufs/notifier_grpc.pb.go | 159 ++++++++++++++++++++++++++++++ pkg/protobufs/plugin_interface.go | 47 --------- 17 files changed, 259 insertions(+), 236 deletions(-) create mode 100644 pkg/protobufs/generate.go create mode 100644 pkg/protobufs/notifier_grpc.pb.go delete mode 100644 pkg/protobufs/plugin_interface.go diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 225c1b57628..bc718414677 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -128,10 +128,21 @@ jobs: with: go-version: "1.22" - - name: Run "make generate" and check for changes + - name: Run "go generate" and check for changes run: | set -e - make generate 2>/dev/null + # ensure the version of 'protoc' matches the one that generated the files + PROTOBUF_VERSION="21.12" + # don't pollute the repo + pushd $HOME + curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d $HOME/.protoc + popd + export PATH="$HOME/.protoc/bin:$PATH" + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 + go generate ./... + protoc --version if [[ $(git status --porcelain) ]]; then echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." git diff diff --git a/Makefile b/Makefile index cb0d5ebaf60..bbfa4bbee94 100644 --- a/Makefile +++ b/Makefile @@ -263,12 +263,6 @@ cscli: ## Build cscli crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) -.PHONY: generate -generate: ## Generate code for the database and APIs - $(GO) generate ./pkg/database/ent - $(GO) generate ./pkg/models - $(GO) generate ./pkg/modelscapi - .PHONY: testclean testclean: bats-clean ## Remove test artifacts @$(RM) pkg/apiserver/ent $(WIN_IGNORE_ERR) diff --git a/cmd/notification-dummy/main.go b/cmd/notification-dummy/main.go index 024a1eb81ba..7fbb10d4fca 100644 --- a/cmd/notification-dummy/main.go +++ b/cmd/notification-dummy/main.go @@ -9,6 +9,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -19,6 +20,7 @@ type PluginConfig struct { } type DummyPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -84,7 +86,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "dummy": &protobufs.NotifierPlugin{ + "dummy": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 2707b7fe1af..5fc02cdd1d7 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -12,6 +12,7 @@ import ( mail "github.com/xhit/go-simple-mail/v2" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -55,6 +56,7 @@ type PluginConfig struct { } type EmailPlugin struct { + protobufs.UnimplementedNotifierServer ConfigByName map[string]PluginConfig } @@ -170,7 +172,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "email": &protobufs.NotifierPlugin{ + "email": &csplugin.NotifierPlugin{ Impl: &EmailPlugin{ConfigByName: make(map[string]PluginConfig)}, }, }, diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index f6649b1f395..a4dbb8ee5db 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -15,6 +15,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -52,6 +53,7 @@ type LogRotate struct { } type FilePlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -241,7 +243,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "file": &protobufs.NotifierPlugin{ + "file": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 6b11a78ef86..3f84984315b 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -16,6 +16,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -34,6 +35,7 @@ type PluginConfig struct { } type HTTPPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -190,7 +192,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "http": &protobufs.NotifierPlugin{ + "http": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index a29e941f80c..0293d45b0a4 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -27,6 +28,7 @@ type PluginConfig struct { } type SentinelPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -122,7 +124,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "sentinel": &protobufs.NotifierPlugin{ + "sentinel": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index fba1b33e334..34c7c0df361 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -10,6 +10,7 @@ import ( "github.com/slack-go/slack" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -23,6 +24,7 @@ type PluginConfig struct { LogLevel *string `yaml:"log_level"` } type Notify struct { + protobufs.UnimplementedNotifierServer ConfigByName map[string]PluginConfig } @@ -84,7 +86,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "slack": &protobufs.NotifierPlugin{ + "slack": &csplugin.NotifierPlugin{ Impl: &Notify{ConfigByName: make(map[string]PluginConfig)}, }, }, diff --git a/cmd/notification-splunk/main.go b/cmd/notification-splunk/main.go index 26190c58a89..e18f416c14a 100644 --- a/cmd/notification-splunk/main.go +++ b/cmd/notification-splunk/main.go @@ -14,6 +14,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -32,6 +33,7 @@ type PluginConfig struct { } type Splunk struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig Client http.Client } @@ -117,7 +119,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "splunk": &protobufs.NotifierPlugin{ + "splunk": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/go.mod b/go.mod index ec8566db84a..b02d3b76840 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 @@ -82,12 +82,12 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.22.0 - golang.org/x/mod v0.15.0 + golang.org/x/crypto v0.26.0 + golang.org/x/mod v0.17.0 golang.org/x/sys v0.24.0 - golang.org/x/text v0.14.0 - google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.33.0 + golang.org/x/text v0.17.0 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 @@ -128,7 +128,7 @@ require ( github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -201,14 +201,14 @@ require ( go.mongodb.org/mongo-driver v1.9.4 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.19.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gotest.tools/v3 v3.5.0 // indirect diff --git a/go.sum b/go.sum index ff73dc56332..7aaea1587b8 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZF github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= @@ -294,8 +294,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -763,8 +763,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -772,8 +772,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -797,8 +797,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -808,8 +808,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -854,8 +854,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -868,8 +868,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -893,8 +893,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -906,14 +906,14 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index f6629b2609e..31d7ac82fb2 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -45,7 +45,7 @@ type PluginBroker struct { pluginConfigByName map[string]PluginConfig pluginMap map[string]plugin.Plugin notificationConfigsByPluginType map[string][][]byte // "slack" -> []{config1, config2} - notificationPluginByName map[string]Notifier + notificationPluginByName map[string]protobufs.NotifierServer watcher PluginWatcher pluginKillMethods []func() pluginProcConfig *csconfig.PluginCfg @@ -75,7 +75,7 @@ type ProfileAlert struct { func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { pb.PluginChannel = make(chan ProfileAlert) pb.notificationConfigsByPluginType = make(map[string][][]byte) - pb.notificationPluginByName = make(map[string]Notifier) + pb.notificationPluginByName = make(map[string]protobufs.NotifierServer) pb.pluginMap = make(map[string]plugin.Plugin) pb.pluginConfigByName = make(map[string]PluginConfig) pb.alertsByPluginName = make(map[string][]*models.Alert) @@ -276,7 +276,7 @@ func (pb *PluginBroker) loadPlugins(path string) error { return pb.verifyPluginBinaryWithProfile() } -func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (Notifier, error) { +func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (protobufs.NotifierServer, error) { handshake, err := getHandshake() if err != nil { @@ -313,7 +313,7 @@ func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) ( return nil, err } pb.pluginKillMethods = append(pb.pluginKillMethods, c.Kill) - return raw.(Notifier), nil + return raw.(protobufs.NotifierServer), nil } func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*models.Alert) error { diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index ed4a4cc4149..615322ac0c3 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -10,17 +10,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) -type Notifier interface { - Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) - Configure(ctx context.Context, cfg *protobufs.Config) (*protobufs.Empty, error) -} - type NotifierPlugin struct { plugin.Plugin - Impl Notifier + Impl protobufs.NotifierServer } -type GRPCClient struct{ client protobufs.NotifierClient } +type GRPCClient struct{ + protobufs.UnimplementedNotifierServer + client protobufs.NotifierClient +} func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { done := make(chan error) @@ -45,7 +43,7 @@ func (m *GRPCClient) Configure(ctx context.Context, config *protobufs.Config) (* } type GRPCServer struct { - Impl Notifier + Impl protobufs.NotifierServer } func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { diff --git a/pkg/protobufs/generate.go b/pkg/protobufs/generate.go new file mode 100644 index 00000000000..0e90d65b643 --- /dev/null +++ b/pkg/protobufs/generate.go @@ -0,0 +1,14 @@ +package protobufs + +// Dependencies: +// +// apt install protobuf-compiler +// +// keep this in sync with go.mod +// go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 +// +// Not the same versions as google.golang.org/grpc +// go list -m -versions google.golang.org/grpc/cmd/protoc-gen-go-grpc +// go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative notifier.proto diff --git a/pkg/protobufs/notifier.pb.go b/pkg/protobufs/notifier.pb.go index b5dc8113568..8c4754da773 100644 --- a/pkg/protobufs/notifier.pb.go +++ b/pkg/protobufs/notifier.pb.go @@ -1,16 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc-gen-go v1.34.2 +// protoc v3.21.12 // source: notifier.proto package protobufs import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -198,7 +194,7 @@ func file_notifier_proto_rawDescGZIP() []byte { } var file_notifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_notifier_proto_goTypes = []interface{}{ +var file_notifier_proto_goTypes = []any{ (*Notification)(nil), // 0: proto.Notification (*Config)(nil), // 1: proto.Config (*Empty)(nil), // 2: proto.Empty @@ -221,7 +217,7 @@ func file_notifier_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_notifier_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Notification); i { case 0: return &v.state @@ -233,7 +229,7 @@ func file_notifier_proto_init() { return nil } } - file_notifier_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Config); i { case 0: return &v.state @@ -245,7 +241,7 @@ func file_notifier_proto_init() { return nil } } - file_notifier_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state @@ -277,119 +273,3 @@ func file_notifier_proto_init() { file_notifier_proto_goTypes = nil file_notifier_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// NotifierClient is the client API for Notifier service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NotifierClient interface { - Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) - Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) -} - -type notifierClient struct { - cc grpc.ClientConnInterface -} - -func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { - return ¬ifierClient{cc} -} - -func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/proto.Notifier/Notify", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/proto.Notifier/Configure", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NotifierServer is the server API for Notifier service. -type NotifierServer interface { - Notify(context.Context, *Notification) (*Empty, error) - Configure(context.Context, *Config) (*Empty, error) -} - -// UnimplementedNotifierServer can be embedded to have forward compatible implementations. -type UnimplementedNotifierServer struct { -} - -func (*UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") -} -func (*UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") -} - -func RegisterNotifierServer(s *grpc.Server, srv NotifierServer) { - s.RegisterService(&_Notifier_serviceDesc, srv) -} - -func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Notification) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotifierServer).Notify(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Notifier/Notify", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotifierServer).Notify(ctx, req.(*Notification)) - } - return interceptor(ctx, in, info, handler) -} - -func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Config) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotifierServer).Configure(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Notifier/Configure", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotifierServer).Configure(ctx, req.(*Config)) - } - return interceptor(ctx, in, info, handler) -} - -var _Notifier_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Notifier", - HandlerType: (*NotifierServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Notify", - Handler: _Notifier_Notify_Handler, - }, - { - MethodName: "Configure", - Handler: _Notifier_Configure_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "notifier.proto", -} diff --git a/pkg/protobufs/notifier_grpc.pb.go b/pkg/protobufs/notifier_grpc.pb.go new file mode 100644 index 00000000000..5141e83f98b --- /dev/null +++ b/pkg/protobufs/notifier_grpc.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: notifier.proto + +package protobufs + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Notifier_Notify_FullMethodName = "/proto.Notifier/Notify" + Notifier_Configure_FullMethodName = "/proto.Notifier/Configure" +) + +// NotifierClient is the client API for Notifier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NotifierClient interface { + Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) + Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) +} + +type notifierClient struct { + cc grpc.ClientConnInterface +} + +func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { + return ¬ifierClient{cc} +} + +func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, Notifier_Notify_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, Notifier_Configure_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotifierServer is the server API for Notifier service. +// All implementations must embed UnimplementedNotifierServer +// for forward compatibility. +type NotifierServer interface { + Notify(context.Context, *Notification) (*Empty, error) + Configure(context.Context, *Config) (*Empty, error) + mustEmbedUnimplementedNotifierServer() +} + +// UnimplementedNotifierServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedNotifierServer struct{} + +func (UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") +} +func (UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (UnimplementedNotifierServer) mustEmbedUnimplementedNotifierServer() {} +func (UnimplementedNotifierServer) testEmbeddedByValue() {} + +// UnsafeNotifierServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NotifierServer will +// result in compilation errors. +type UnsafeNotifierServer interface { + mustEmbedUnimplementedNotifierServer() +} + +func RegisterNotifierServer(s grpc.ServiceRegistrar, srv NotifierServer) { + // If the following call pancis, it indicates UnimplementedNotifierServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Notifier_ServiceDesc, srv) +} + +func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Notification) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Notify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Notifier_Notify_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Notify(ctx, req.(*Notification)) + } + return interceptor(ctx, in, info, handler) +} + +func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Config) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Notifier_Configure_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Configure(ctx, req.(*Config)) + } + return interceptor(ctx, in, info, handler) +} + +// Notifier_ServiceDesc is the grpc.ServiceDesc for Notifier service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Notifier_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Notifier", + HandlerType: (*NotifierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Notify", + Handler: _Notifier_Notify_Handler, + }, + { + MethodName: "Configure", + Handler: _Notifier_Configure_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "notifier.proto", +} diff --git a/pkg/protobufs/plugin_interface.go b/pkg/protobufs/plugin_interface.go deleted file mode 100644 index baa76c8941c..00000000000 --- a/pkg/protobufs/plugin_interface.go +++ /dev/null @@ -1,47 +0,0 @@ -package protobufs - -import ( - "context" - - plugin "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" -) - -type Notifier interface { - Notify(ctx context.Context, notification *Notification) (*Empty, error) - Configure(ctx context.Context, config *Config) (*Empty, error) -} - -// This is the implementation of plugin.NotifierPlugin so we can serve/consume this. -type NotifierPlugin struct { - // GRPCPlugin must still implement the Plugin interface - plugin.Plugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - Impl Notifier -} - -type GRPCClient struct{ client NotifierClient } - -func (m *GRPCClient) Notify(ctx context.Context, notification *Notification) (*Empty, error) { - _, err := m.client.Notify(ctx, notification) - return &Empty{}, err -} - -func (m *GRPCClient) Configure(ctx context.Context, config *Config) (*Empty, error) { - _, err := m.client.Configure(ctx, config) - return &Empty{}, err -} - -type GRPCServer struct { - Impl Notifier -} - -func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - RegisterNotifierServer(s, p.Impl) - return nil -} - -func (p *NotifierPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCClient{client: NewNotifierClient(c)}, nil -} From 8ff58ee74eb9a471c9d1eb4d5de5f20ca68cbbf0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:18:59 +0200 Subject: [PATCH 315/318] context propagation: pkg/csplugin (#3273) --- .../clinotifications/notifications.go | 10 ++++---- cmd/crowdsec/api.go | 2 +- pkg/apiserver/alerts_test.go | 4 ++-- pkg/apiserver/apiserver_test.go | 8 ++----- pkg/apiserver/jwt_test.go | 4 ++-- pkg/apiserver/machines_test.go | 2 +- pkg/csplugin/broker.go | 8 +++---- pkg/csplugin/broker_suite_test.go | 12 ++++++++-- pkg/csplugin/broker_test.go | 24 +++++++++++++------ pkg/csplugin/broker_win_test.go | 7 ++++-- pkg/csplugin/watcher_test.go | 18 ++++++++++++-- 11 files changed, 66 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 0641dd1a7d4..5489faa37c8 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -275,7 +275,8 @@ func (cli cliNotifications) newTestCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.notificationConfigFilter, - PreRunE: func(_ *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() cfg := cli.cfg() pconfigs, err := cli.getPluginConfigs() if err != nil { @@ -286,7 +287,7 @@ func (cli cliNotifications) newTestCmd() *cobra.Command { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } // Create a single profile with plugin name as notification name - return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ + return pluginBroker.Init(ctx, cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ pcfg.Name, @@ -377,12 +378,13 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return nil }, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb ) + ctx := cmd.Context() cfg := cli.cfg() if alertOverride != "" { @@ -391,7 +393,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not } } - err := pluginBroker.Init(cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) + err := pluginBroker.Init(ctx, cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) if err != nil { return fmt.Errorf("can't initialize plugins: %w", err) } diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 6ab41def16f..ccb0acf0209 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -40,7 +40,7 @@ func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.AP return nil, errors.New("plugins are enabled, but config_paths.plugin_dir is not defined") } - err = pluginBroker.Init(cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) + err = pluginBroker.Init(ctx, cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) if err != nil { return nil, fmt.Errorf("unable to run plugin broker: %w", err) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index cd981f76542..4cc215c344f 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -72,8 +72,8 @@ func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.Wat } func LoginToTestAPI(t *testing.T, ctx context.Context, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { - body := CreateTestMachine(t, router, "") - ValidateMachine(t, "test", config.API.Server.DbConfig) + body := CreateTestMachine(t, ctx, router, "") + ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b04ad687e4e..cdf99462c35 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -180,9 +180,7 @@ func NewAPITestForwardedFor(t *testing.T, ctx context.Context) (*gin.Engine, csc return router, config } -func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - ctx := context.TODO() - +func ValidateMachine(t *testing.T, ctx context.Context, machineID string, config *csconfig.DatabaseCfg) { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) @@ -269,7 +267,7 @@ func readDecisionsStreamResp(t *testing.T, resp *httptest.ResponseRecorder) (map return response, resp.Code } -func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { +func CreateTestMachine(t *testing.T, ctx context.Context, router *gin.Engine, token string) string { regReq := MachineTest regReq.RegistrationToken = token b, err := json.Marshal(regReq) @@ -277,8 +275,6 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { body := string(b) - ctx := context.Background() - w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Set("User-Agent", UserAgent) diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 710cf82ad00..f6f51763975 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -14,7 +14,7 @@ func TestLogin(t *testing.T) { ctx := context.Background() router, config := NewAPITest(t, ctx) - body := CreateTestMachine(t, router, "") + body := CreateTestMachine(t, ctx, router, "") // Login with machine not validated yet w := httptest.NewRecorder() @@ -53,7 +53,7 @@ func TestLogin(t *testing.T) { assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) // Validate machine - ValidateMachine(t, "test", config.API.Server.DbConfig) + ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) // Login with invalid password w = httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index e60cec30e54..969f75707d6 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -131,7 +131,7 @@ func TestCreateMachineAlreadyExist(t *testing.T) { ctx := context.Background() router, _ := NewAPITest(t, ctx) - body := CreateTestMachine(t, router, "") + body := CreateTestMachine(t, ctx, router, "") w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index 31d7ac82fb2..e996fa9b68c 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -72,7 +72,7 @@ type ProfileAlert struct { Alert *models.Alert } -func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { +func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { pb.PluginChannel = make(chan ProfileAlert) pb.notificationConfigsByPluginType = make(map[string][][]byte) pb.notificationPluginByName = make(map[string]protobufs.NotifierServer) @@ -85,7 +85,7 @@ func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*cs if err := pb.loadConfig(configPaths.NotificationDir); err != nil { return fmt.Errorf("while loading plugin config: %w", err) } - if err := pb.loadPlugins(configPaths.PluginDir); err != nil { + if err := pb.loadPlugins(ctx, configPaths.PluginDir); err != nil { return fmt.Errorf("while loading plugin: %w", err) } pb.watcher = PluginWatcher{} @@ -230,7 +230,7 @@ func (pb *PluginBroker) verifyPluginBinaryWithProfile() error { return nil } -func (pb *PluginBroker) loadPlugins(path string) error { +func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { binaryPaths, err := listFilesAtPath(path) if err != nil { return err @@ -265,7 +265,7 @@ func (pb *PluginBroker) loadPlugins(path string) error { return err } data = []byte(csstring.StrictExpand(string(data), os.LookupEnv)) - _, err = pluginClient.Configure(context.Background(), &protobufs.Config{Config: data}) + _, err = pluginClient.Configure(ctx, &protobufs.Config{Config: data}) if err != nil { return fmt.Errorf("while configuring %s: %w", pc.Name, err) } diff --git a/pkg/csplugin/broker_suite_test.go b/pkg/csplugin/broker_suite_test.go index 778bb2dfe2e..1210c67058a 100644 --- a/pkg/csplugin/broker_suite_test.go +++ b/pkg/csplugin/broker_suite_test.go @@ -1,6 +1,7 @@ package csplugin import ( + "context" "io" "os" "os/exec" @@ -96,6 +97,7 @@ func (s *PluginSuite) TearDownTest() { func (s *PluginSuite) SetupSubTest() { var err error + t := s.T() s.runDir, err = os.MkdirTemp("", "cs_plugin_test") @@ -127,6 +129,7 @@ func (s *PluginSuite) SetupSubTest() { func (s *PluginSuite) TearDownSubTest() { t := s.T() + if s.pluginBroker != nil { s.pluginBroker.Kill() s.pluginBroker = nil @@ -140,19 +143,24 @@ func (s *PluginSuite) TearDownSubTest() { os.Remove("./out") } -func (s *PluginSuite) InitBroker(procCfg *csconfig.PluginCfg) (*PluginBroker, error) { +func (s *PluginSuite) InitBroker(ctx context.Context, procCfg *csconfig.PluginCfg) (*PluginBroker, error) { pb := PluginBroker{} + if procCfg == nil { procCfg = &csconfig.PluginCfg{} } + profiles := csconfig.NewDefaultConfig().API.Server.Profiles profiles = append(profiles, &csconfig.ProfileCfg{ Notifications: []string{"dummy_default"}, }) - err := pb.Init(procCfg, profiles, &csconfig.ConfigurationPaths{ + + err := pb.Init(ctx, procCfg, profiles, &csconfig.ConfigurationPaths{ PluginDir: s.pluginDir, NotificationDir: s.notifDir, }) + s.pluginBroker = &pb + return s.pluginBroker, err } diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 48f5a71f773..ae5a615b489 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -4,6 +4,7 @@ package csplugin import ( "bytes" + "context" "encoding/json" "io" "os" @@ -53,6 +54,7 @@ func (s *PluginSuite) writeconfig(config PluginConfig) { } func (s *PluginSuite) TestBrokerInit() { + ctx := context.Background() tests := []struct { name string action func(*testing.T) @@ -135,20 +137,22 @@ func (s *PluginSuite) TestBrokerInit() { tc.action(t) } - _, err := s.InitBroker(&tc.procCfg) + _, err := s.InitBroker(ctx, &tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } func (s *PluginSuite) TestBrokerNoThreshold() { + ctx := context.Background() + var alerts []models.Alert DefaultEmptyTicker = 50 * time.Millisecond t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -187,6 +191,8 @@ func (s *PluginSuite) TestBrokerNoThreshold() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { + ctx := context.Background() + // test grouping by "time" DefaultEmptyTicker = 50 * time.Millisecond @@ -198,7 +204,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { cfg.GroupWait = 1 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -224,6 +230,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -234,7 +241,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { cfg.GroupWait = 4 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -264,6 +271,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { } func (s *PluginSuite) TestBrokerRunGroupThreshold() { + ctx := context.Background() // test grouping by "size" DefaultEmptyTicker = 50 * time.Millisecond @@ -274,7 +282,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { cfg.GroupThreshold = 4 s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -318,6 +326,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { } func (s *PluginSuite) TestBrokerRunTimeThreshold() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -327,7 +336,7 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { cfg.GroupWait = 1 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -353,11 +362,12 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { } func (s *PluginSuite) TestBrokerRunSimple() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index b7956bdcc0a..570f23e5015 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -4,6 +4,7 @@ package csplugin import ( "bytes" + "context" "encoding/json" "io" "os" @@ -26,6 +27,7 @@ not if it will actually reject plugins with invalid permissions */ func (s *PluginSuite) TestBrokerInit() { + ctx := context.Background() tests := []struct { name string action func(*testing.T) @@ -59,16 +61,17 @@ func (s *PluginSuite) TestBrokerInit() { if tc.action != nil { tc.action(t) } - _, err := s.InitBroker(&tc.procCfg) + _, err := s.InitBroker(ctx, &tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } func (s *PluginSuite) TestBrokerRun() { + ctx := context.Background() t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index b76c3c4eadd..84e63ec6493 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -15,11 +15,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -var ctx = context.Background() - func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { testTomb.Kill(nil) <-pw.PluginEvents + if err := testTomb.Wait(); err != nil { log.Fatal(err) } @@ -46,13 +45,17 @@ func listenChannelWithTimeout(ctx context.Context, channel chan string) error { case <-ctx.Done(): return ctx.Err() } + return nil } func TestPluginWatcherInterval(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") } + pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) testTomb := tomb.Tomb{} @@ -66,6 +69,7 @@ func TestPluginWatcherInterval(t *testing.T) { ct, cancel := context.WithTimeout(ctx, time.Microsecond) defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") resetTestTomb(&testTomb, &pw) @@ -74,6 +78,7 @@ func TestPluginWatcherInterval(t *testing.T) { ct, cancel = context.WithTimeout(ctx, time.Millisecond*5) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) resetTestTomb(&testTomb, &pw) @@ -81,9 +86,12 @@ func TestPluginWatcherInterval(t *testing.T) { } func TestPluginAlertCountWatcher(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") } + pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) configs := map[string]PluginConfig{ @@ -92,28 +100,34 @@ func TestPluginAlertCountWatcher(t *testing.T) { }, } testTomb := tomb.Tomb{} + pw.Init(configs, alertsByPluginName) pw.Start(&testTomb) // Channel won't contain any events since threshold is not crossed. ct, cancel := context.WithTimeout(ctx, time.Second) defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") // Channel won't contain any events since threshold is not crossed. resetWatcherAlertCounter(&pw) insertNAlertsToPlugin(&pw, 4, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") // Channel will contain an event since threshold is crossed. resetWatcherAlertCounter(&pw) insertNAlertsToPlugin(&pw, 5, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) resetTestTomb(&testTomb, &pw) From 9976616773313bb56d052996be3f0d5fcee99d4a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 11 Oct 2024 15:59:10 +0200 Subject: [PATCH 316/318] context propagation: StreamingAcquisition() (#3274) * context propagation: StreamingAcquisition() * lint * ship with codecov.yml --- .github/generate-codecov-yml.sh | 3 + .github/workflows/go-tests.yml | 4 -- cmd/crowdsec/crowdsec.go | 2 +- pkg/acquisition/acquisition.go | 56 ++++++++++++++----- pkg/acquisition/acquisition_test.go | 52 +++++++++-------- pkg/acquisition/modules/appsec/appsec.go | 12 ++-- .../modules/cloudwatch/cloudwatch.go | 46 ++++++++++----- .../modules/cloudwatch/cloudwatch_test.go | 13 +++-- pkg/acquisition/modules/docker/docker.go | 8 +-- pkg/acquisition/modules/docker/docker_test.go | 5 +- pkg/acquisition/modules/file/file.go | 3 +- pkg/acquisition/modules/file/file_test.go | 4 +- .../modules/journalctl/journalctl.go | 8 +-- .../modules/journalctl/journalctl_test.go | 4 +- pkg/acquisition/modules/kafka/kafka.go | 6 +- pkg/acquisition/modules/kafka/kafka_test.go | 14 +++-- pkg/acquisition/modules/kinesis/kinesis.go | 34 +++++------ .../modules/kinesis/kinesis_test.go | 26 +++++---- .../modules/kubernetesaudit/k8s_audit.go | 3 +- .../modules/kubernetesaudit/k8s_audit_test.go | 9 ++- pkg/acquisition/modules/loki/loki.go | 6 +- pkg/acquisition/modules/loki/loki_test.go | 9 ++- pkg/acquisition/modules/s3/s3.go | 21 ++++--- pkg/acquisition/modules/s3/s3_test.go | 6 +- pkg/acquisition/modules/syslog/syslog.go | 9 +-- pkg/acquisition/modules/syslog/syslog_test.go | 16 ++++-- .../modules/wineventlog/wineventlog.go | 3 +- .../modules/wineventlog/wineventlog_test.go | 4 +- .../wineventlog/wineventlog_windows.go | 3 +- 29 files changed, 235 insertions(+), 154 deletions(-) diff --git a/.github/generate-codecov-yml.sh b/.github/generate-codecov-yml.sh index cc2d652e339..ddb60d0ce80 100755 --- a/.github/generate-codecov-yml.sh +++ b/.github/generate-codecov-yml.sh @@ -7,6 +7,9 @@ cat <> .github/codecov.yml - - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 460e8ab4328..c44d71d2093 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -169,7 +169,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H log.Info("Starting processing data") - if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { + if err := acquisition.StartAcquisition(context.TODO(), dataSources, inputLineChan, &acquisTomb); err != nil { return fmt.Errorf("starting acquisition error: %w", err) } diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 4a5226a2981..4519ea7392b 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -1,6 +1,7 @@ package acquisition import ( + "context" "errors" "fmt" "io" @@ -39,17 +40,17 @@ func (e *DataSourceUnavailableError) Unwrap() error { // The interface each datasource must implement type DataSource interface { - GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module - GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) - UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime - Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. - ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource - GetMode() string // Get the mode (TAIL, CAT or SERVER) - GetName() string // Get the name of the module - OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) - StreamingAcquisition(chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) - CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) - GetUuid() string // Get the unique identifier of the datasource + GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module + GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) + UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime + Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. + ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource + GetMode() string // Get the mode (TAIL, CAT or SERVER) + GetName() string // Get the name of the module + OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) + StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) + CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) + GetUuid() string // Get the unique identifier of the datasource Dump() interface{} } @@ -242,8 +243,10 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig for { var sub configuration.DataSourceCommonCfg - err = dec.Decode(&sub) + idx += 1 + + err = dec.Decode(&sub) if err != nil { if !errors.Is(err, io.EOF) { return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) @@ -283,6 +286,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig uniqueId := uuid.NewString() sub.UniqueId = uniqueId + src, err := DataSourceConfigure(sub, metrics_level) if err != nil { var dserr *DataSourceUnavailableError @@ -290,29 +294,36 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Error(err) continue } + return nil, fmt.Errorf("while configuring datasource of type %s from %s (position: %d): %w", sub.Source, acquisFile, idx, err) } + if sub.TransformExpr != "" { vm, err := expr.Compile(sub.TransformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position: %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) } + transformRuntimes[uniqueId] = vm } + sources = append(sources, *src) } } + return sources, nil } func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector + for i := range sources { if aggregated { metrics = sources[i].GetMetrics() } else { metrics = sources[i].GetAggregMetrics() } + for _, metric := range metrics { if err := prometheus.Register(metric); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { @@ -322,12 +333,14 @@ func GetMetrics(sources []DataSource, aggregated bool) error { } } } + return nil } func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") + for { select { case <-AcquisTomb.Dying(): @@ -335,15 +348,18 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo return case evt := <-transformChan: logger.Tracef("Received event %s", evt.Line.Raw) + out, err := expr.Run(transformRuntime, map[string]interface{}{"evt": &evt}) if err != nil { logger.Errorf("while running transform expression: %s, sending event as-is", err) output <- evt } + if out == nil { logger.Errorf("transform expression returned nil, sending event as-is") output <- evt } + switch v := out.(type) { case string: logger.Tracef("transform expression returned %s", v) @@ -351,18 +367,22 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo output <- evt case []interface{}: logger.Tracef("transform expression returned %v", v) //nolint:asasalint // We actually want to log the slice content + for _, line := range v { l, ok := line.(string) if !ok { logger.Errorf("transform expression returned []interface{}, but cannot assert an element to string") output <- evt + continue } + evt.Line.Raw = l output <- evt } case []string: logger.Tracef("transform expression returned %v", v) + for _, line := range v { evt.Line.Raw = line output <- evt @@ -375,7 +395,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo } } -func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { // Don't wait if we have no sources, as it will hang forever if len(sources) == 0 { return nil @@ -387,32 +407,40 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb AcquisTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis") + var err error outChan := output + log.Debugf("datasource %s UUID: %s", subsrc.GetName(), subsrc.GetUuid()) + if transformRuntime, ok := transformRuntimes[subsrc.GetUuid()]; ok { log.Infof("transform expression found for datasource %s", subsrc.GetName()) + transformChan := make(chan types.Event) outChan = transformChan transformLogger := log.WithFields(log.Fields{ "component": "transform", "datasource": subsrc.GetName(), }) + AcquisTomb.Go(func() error { transform(outChan, output, AcquisTomb, transformRuntime, transformLogger) return nil }) } + if subsrc.GetMode() == configuration.TAIL_MODE { - err = subsrc.StreamingAcquisition(outChan, AcquisTomb) + err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) } else { err = subsrc.OneShotAcquisition(outChan, AcquisTomb) } + if err != nil { // if one of the acqusition returns an error, we kill the others to properly shutdown AcquisTomb.Kill(err) } + return nil }) } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index e39199f9cdb..e82b3df54c2 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -1,6 +1,7 @@ package acquisition import ( + "context" "errors" "fmt" "strings" @@ -56,14 +57,16 @@ func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) return nil } -func (f *MockSource) GetMode() string { return f.Mode } -func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSource) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSource) CanRun() error { return nil } -func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } -func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSource) Dump() interface{} { return f } -func (f *MockSource) GetName() string { return "mock" } +func (f *MockSource) GetMode() string { return f.Mode } +func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSource) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} +func (f *MockSource) CanRun() error { return nil } +func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSource) Dump() interface{} { return f } +func (f *MockSource) GetName() string { return "mock" } func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return errors.New("not supported") } @@ -327,7 +330,7 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } -func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { +func (f *MockCat) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { return errors.New("can't run in tail") } func (f *MockCat) CanRun() error { return nil } @@ -366,7 +369,7 @@ func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) err return errors.New("can't run in cat mode") } -func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *MockTail) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" @@ -389,6 +392,7 @@ func (f *MockTail) GetUuid() string { return "" } // func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { func TestStartAcquisitionCat(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockCat{}, } @@ -396,7 +400,7 @@ func TestStartAcquisitionCat(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil { t.Errorf("unexpected error") } }() @@ -416,6 +420,7 @@ READLOOP: } func TestStartAcquisitionTail(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockTail{}, } @@ -423,7 +428,7 @@ func TestStartAcquisitionTail(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil { t.Errorf("unexpected error") } }() @@ -450,7 +455,7 @@ type MockTailError struct { MockTail } -func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *MockTailError) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" @@ -463,6 +468,7 @@ func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } func TestStartAcquisitionTailError(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockTailError{}, } @@ -470,7 +476,7 @@ func TestStartAcquisitionTailError(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { t.Errorf("expected error, got '%s'", err) } }() @@ -501,14 +507,16 @@ func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { return nil } -func (f *MockSourceByDSN) GetMode() string { return f.Mode } -func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSourceByDSN) CanRun() error { return nil } -func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) Dump() interface{} { return f } -func (f *MockSourceByDSN) GetName() string { return "mockdsn" } +func (f *MockSourceByDSN) GetMode() string { return f.Mode } +func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSourceByDSN) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} +func (f *MockSourceByDSN) CanRun() error { return nil } +func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) Dump() interface{} { return f } +func (f *MockSourceByDSN) GetName() string { return "mockdsn" } func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { dsn = strings.TrimPrefix(dsn, "mockdsn://") if dsn != "test_expect" { diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 8a93326c7e3..5161b631c33 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -59,7 +59,7 @@ type AppsecSource struct { AppsecConfigs map[string]appsec.AppsecConfig lapiURL string AuthCache AuthCache - AppsecRunners []AppsecRunner //one for each go-routine + AppsecRunners []AppsecRunner // one for each go-routine } // Struct to handle cache of authentication @@ -172,7 +172,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} - //let's load the associated appsec_config: + // let's load the associated appsec_config: if w.config.AppsecConfigPath != "" { err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) if err != nil { @@ -201,7 +201,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() - //we copy AppsecRutime for each runner + // we copy AppsecRutime for each runner wrt := *w.AppsecRuntime wrt.Logger = w.logger.Dup().WithField("runner_uuid", appsecRunnerUUID) runner := AppsecRunner{ @@ -220,7 +220,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.logger.Infof("Created %d appsec runners", len(w.AppsecRunners)) - //We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec + // We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec w.mux.HandleFunc(w.config.Path, w.appsecHandler) return nil } @@ -241,7 +241,7 @@ func (w *AppsecSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er return errors.New("AppSec datasource does not support command line acquisition") } -func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") @@ -292,7 +292,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) }) <-t.Dying() w.logger.Info("Shutting down Appsec server") - //xx let's clean up the appsec runners :) + // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) w.server.Shutdown(context.TODO()) return nil diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index d6f33b68050..e4b6c95d77f 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -159,6 +159,7 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr if err != nil { return err } + cw.metricsLevel = MetricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -175,16 +176,18 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr if *cw.Config.MaxStreamAge > *cw.Config.StreamReadTimeout { cw.logger.Warningf("max_stream_age > stream_read_timeout, stream might keep being opened/closed") } + cw.logger.Tracef("aws_config_dir set to %s", *cw.Config.AwsConfigDir) if *cw.Config.AwsConfigDir != "" { _, err := os.Stat(*cw.Config.AwsConfigDir) if err != nil { cw.logger.Errorf("can't read aws_config_dir '%s' got err %s", *cw.Config.AwsConfigDir, err) - return fmt.Errorf("can't read aws_config_dir %s got err %s ", *cw.Config.AwsConfigDir, err) + return fmt.Errorf("can't read aws_config_dir %s got err %w ", *cw.Config.AwsConfigDir, err) } + os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - //as aws sdk relies on $HOME, let's allow the user to override it :) + // as aws sdk relies on $HOME, let's allow the user to override it :) os.Setenv("AWS_CONFIG_FILE", fmt.Sprintf("%s/config", *cw.Config.AwsConfigDir)) os.Setenv("AWS_SHARED_CREDENTIALS_FILE", fmt.Sprintf("%s/credentials", *cw.Config.AwsConfigDir)) } else { @@ -192,25 +195,30 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir") return errors.New("aws_region is not specified, specify it or aws_config_dir") } + os.Setenv("AWS_REGION", *cw.Config.AwsRegion) } if err := cw.newClient(); err != nil { return err } + cw.streamIndexes = make(map[string]string) targetStream := "*" + if cw.Config.StreamRegexp != nil { if _, err := regexp.Compile(*cw.Config.StreamRegexp); err != nil { return fmt.Errorf("while compiling regexp '%s': %w", *cw.Config.StreamRegexp, err) } + targetStream = *cw.Config.StreamRegexp } else if cw.Config.StreamName != nil { targetStream = *cw.Config.StreamName } cw.logger.Infof("Adding cloudwatch group '%s' (stream:%s) to datasources", cw.Config.GroupName, targetStream) + return nil } @@ -231,24 +239,29 @@ func (cw *CloudwatchSource) newClient() error { if sess == nil { return errors.New("failed to create aws session") } + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { cw.logger.Debugf("[testing] overloading endpoint with %s", v) cw.cwClient = cloudwatchlogs.New(sess, aws.NewConfig().WithEndpoint(v)) } else { cw.cwClient = cloudwatchlogs.New(sess) } + if cw.cwClient == nil { return errors.New("failed to create cloudwatch client") } + return nil } -func (cw *CloudwatchSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (cw *CloudwatchSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { cw.t = t monitChan := make(chan LogStreamTailConfig) + t.Go(func() error { return cw.LogStreamManager(monitChan, out) }) + return cw.WatchLogGroupForStreams(monitChan) } @@ -279,6 +292,7 @@ func (cw *CloudwatchSource) Dump() interface{} { func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig) error { cw.logger.Debugf("Starting to watch group (interval:%s)", cw.Config.PollNewStreamInterval) ticker := time.NewTicker(*cw.Config.PollNewStreamInterval) + var startFrom *string for { @@ -289,11 +303,12 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig case <-ticker.C: hasMoreStreams := true startFrom = nil + for hasMoreStreams { cw.logger.Tracef("doing the call to DescribeLogStreamsPagesWithContext") ctx := context.Background() - //there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime + // there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime err := cw.cwClient.DescribeLogStreamsPagesWithContext( ctx, &cloudwatchlogs.DescribeLogStreamsInput{ @@ -305,13 +320,14 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig }, func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { cw.logger.Tracef("in helper of DescribeLogStreamsPagesWithContext") + for _, event := range page.LogStreams { startFrom = page.NextToken - //we check if the stream has been written to recently enough to be monitored + // we check if the stream has been written to recently enough to be monitored if event.LastIngestionTime != nil { - //aws uses millisecond since the epoch + // aws uses millisecond since the epoch oldest := time.Now().UTC().Add(-*cw.Config.MaxStreamAge) - //TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. + // TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) if LastIngestionTime.Before(oldest) { cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) @@ -319,7 +335,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig return false } cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) - //the stream has been updated recently, check if we should monitor it + // the stream has been updated recently, check if we should monitor it var expectMode int if !cw.Config.UseTimeMachine { expectMode = types.LIVE @@ -383,7 +399,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha for idx, stream := range cw.monitoredStreams { if newStream.GroupName == stream.GroupName && newStream.StreamName == stream.StreamName { - //stream exists, but is dead, remove it from list + // stream exists, but is dead, remove it from list if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) @@ -397,7 +413,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha } } - //let's start watching this stream + // let's start watching this stream if shouldCreate { if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() @@ -445,7 +461,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan var startFrom *string lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) - //resume at existing index if we already had + // resume at existing index if we already had streamIndexMutex.Lock() v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] streamIndexMutex.Unlock() @@ -566,7 +582,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'start_date'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported strdate, startDate := parser.GenDateParse(v[0]) cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) cw.Config.StartTime = &startDate @@ -574,7 +590,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'end_date'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported strdate, endDate := parser.GenDateParse(v[0]) cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) cw.Config.EndTime = &endDate @@ -582,7 +598,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'backlog'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported duration, err := time.ParseDuration(v[0]) if err != nil { return fmt.Errorf("unable to parse '%s' as duration: %w", v[0], err) @@ -618,7 +634,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, } func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - //StreamName string, Start time.Time, End time.Time + // StreamName string, Start time.Time, End time.Time config := LogStreamTailConfig{ GroupName: cw.Config.GroupName, StreamName: *cw.Config.StreamName, diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index bab7593f26f..d62c3f6e3dd 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,6 +1,7 @@ package cloudwatchacquisition import ( + "context" "errors" "fmt" "net" @@ -34,6 +35,7 @@ func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { input := &cloudwatchlogs.DescribeLogGroupsInput{} result, err := cw.cwClient.DescribeLogGroups(input) require.NoError(t, err) + for _, group := range result.LogGroups { _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ LogGroupName: group.LogGroupName, @@ -62,18 +64,22 @@ func TestMain(m *testing.M) { if runtime.GOOS == "windows" { os.Exit(0) } + if err := checkForLocalStackAvailability(); err != nil { log.Fatalf("local stack error : %s", err) } + def_PollNewStreamInterval = 1 * time.Second def_PollStreamInterval = 1 * time.Second def_StreamReadTimeout = 10 * time.Second def_MaxStreamAge = 5 * time.Second def_PollDeadStreamInterval = 5 * time.Second + os.Exit(m.Run()) } func TestWatchLogGroupForStreams(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -447,7 +453,7 @@ stream_name: test_stream`), dbgLogger.Infof("running StreamingAcquisition") actmb := tomb.Tomb{} actmb.Go(func() error { - err := cw.StreamingAcquisition(out, &actmb) + err := cw.StreamingAcquisition(ctx, out, &actmb) dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) return nil @@ -503,7 +509,6 @@ stream_name: test_stream`), if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } - } if tc.teardown != nil { tc.teardown(t, &cw) @@ -513,6 +518,7 @@ stream_name: test_stream`), } func TestConfiguration(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -571,7 +577,7 @@ stream_name: test_stream`), switch cw.GetMode() { case "tail": - err = cw.StreamingAcquisition(out, &tmb) + err = cw.StreamingAcquisition(ctx, out, &tmb) case "cat": err = cw.OneShotAcquisition(out, &tmb) } @@ -798,7 +804,6 @@ func TestOneShotAcquisition(t *testing.T) { if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } - } if tc.teardown != nil { tc.teardown(t, &cw) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 44fee0a99a2..874b1556fd5 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -518,7 +518,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha } } -func (d *DockerSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.t = t monitChan := make(chan *ContainerConfig) deleteChan := make(chan *ContainerConfig) @@ -589,11 +589,11 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types outChan <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) case <-readerTomb.Dying(): - //This case is to handle temporarily losing the connection to the docker socket - //The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) + // This case is to handle temporarily losing the connection to the docker socket + // The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) d.logger.Debugf("readerTomb dying for container %s, removing it from runningContainerState", container.Name) deleteChan <- container - //Also reset the Since to avoid re-reading logs + // Also reset the Since to avoid re-reading logs d.Config.Since = time.Now().UTC().Format(time.RFC3339) d.containerLogsOptions.Since = d.Config.Since return nil diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e332569fb3a..e394c9cbe79 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -120,6 +120,7 @@ type mockDockerCli struct { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") @@ -185,7 +186,7 @@ container_name_regexp: readerTomb := &tomb.Tomb{} streamTomb := tomb.Tomb{} streamTomb.Go(func() error { - return dockerSource.StreamingAcquisition(out, &dockerTomb) + return dockerSource.StreamingAcquisition(ctx, out, &dockerTomb) }) readerTomb.Go(func() error { time.Sleep(1 * time.Second) @@ -245,7 +246,7 @@ func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, o for _, line := range data { startLineByte := make([]byte, 8) - binary.LittleEndian.PutUint32(startLineByte, 1) //stdout stream + binary.LittleEndian.PutUint32(startLineByte, 1) // stdout stream binary.BigEndian.PutUint32(startLineByte[4:], uint32(len(line))) ret += fmt.Sprintf("%s%s", startLineByte, line) } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 85b4c1b5b32..2d2df3ff4d4 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -3,6 +3,7 @@ package fileacquisition import ( "bufio" "compress/gzip" + "context" "errors" "fmt" "io" @@ -320,7 +321,7 @@ func (f *FileSource) CanRun() error { return nil } -func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *FileSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("Starting live acquisition") t.Go(func() error { return f.monitorNewFiles(out, t) diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 5d38552b3c5..3db0042ba2f 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -1,6 +1,7 @@ package fileacquisition_test import ( + "context" "fmt" "os" "runtime" @@ -243,6 +244,7 @@ filename: test_files/test_delete.log`, } func TestLiveAcquisition(t *testing.T) { + ctx := context.Background() permDeniedFile := "/etc/shadow" permDeniedError := "unable to read /etc/shadow : open /etc/shadow: permission denied" testPattern := "test_files/*.log" @@ -394,7 +396,7 @@ force_inotify: true`, testPattern), }() } - err = f.StreamingAcquisition(out, &tomb) + err = f.StreamingAcquisition(ctx, out, &tomb) cstest.RequireErrorContains(t, err, tc.expectedErr) if tc.expectedLines != 0 { diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 1336fac4578..b9cda54a472 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -113,7 +113,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err return readLine(stdoutscanner, stdoutChan, errChan) }) t.Go(func() error { - //looks like journalctl closes stderr quite early, so ignore its status (but not its output) + // looks like journalctl closes stderr quite early, so ignore its status (but not its output) return readLine(stderrScanner, stderrChan, nil) }) @@ -122,7 +122,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err case <-t.Dying(): logger.Infof("journalctl datasource %s stopping", j.src) cancel() - cmd.Wait() //avoid zombie process + cmd.Wait() // avoid zombie process return nil case stdoutLine := <-stdoutChan: l := types.Line{} @@ -217,7 +217,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, j.config.Labels = labels j.config.UniqueId = uuid - //format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 + // format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 if !strings.HasPrefix(dsn, "journalctl://") { return fmt.Errorf("invalid DSN %s for journalctl source, must start with journalctl://", dsn) } @@ -269,7 +269,7 @@ func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb return err } -func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") return j.runJournalCtl(out, t) diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 53e2d0802ad..c416bb5d23e 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -1,6 +1,7 @@ package journalctlacquisition import ( + "context" "os" "os/exec" "path/filepath" @@ -187,6 +188,7 @@ journalctl_filter: } func TestStreaming(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -250,7 +252,7 @@ journalctl_filter: }() } - err = j.StreamingAcquisition(out, &tomb) + err = j.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index a0d7fc39bcc..9fd5fc2a035 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -23,9 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - dataSourceName = "kafka" -) +var dataSourceName = "kafka" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -204,7 +202,7 @@ func (k *KafkaSource) RunReader(out chan types.Event, t *tomb.Tomb) error { } } -func (k *KafkaSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { k.logger.Infof("start reader on brokers '%+v' with topic '%s'", k.Config.Brokers, k.Config.Topic) t.Go(func() error { diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 7b467142cc9..d796166a6ca 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -80,9 +80,9 @@ group_id: crowdsec`, } } -func writeToKafka(w *kafka.Writer, logs []string) { +func writeToKafka(ctx context.Context, w *kafka.Writer, logs []string) { for idx, log := range logs { - err := w.WriteMessages(context.Background(), kafka.Message{ + err := w.WriteMessages(ctx, kafka.Message{ Key: []byte(strconv.Itoa(idx)), // create an arbitrary message payload for the value Value: []byte(log), @@ -128,6 +128,7 @@ func createTopic(topic string, broker string) { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -176,12 +177,12 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) tomb := tomb.Tomb{} out := make(chan types.Event) - err = k.StreamingAcquisition(out, &tomb) + err = k.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 - go writeToKafka(w, ts.logs) + go writeToKafka(ctx, w, ts.logs) READLOOP: for { select { @@ -199,6 +200,7 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } func TestStreamingAcquisitionWithSSL(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -252,12 +254,12 @@ tls: tomb := tomb.Tomb{} out := make(chan types.Event) - err = k.StreamingAcquisition(out, &tomb) + err = k.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 - go writeToKafka(w2, ts.logs) + go writeToKafka(ctx, w2, ts.logs) READLOOP: for { select { diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 0e6c1980fa9..ca3a847dbfb 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -3,6 +3,7 @@ package kinesisacquisition import ( "bytes" "compress/gzip" + "context" "encoding/json" "errors" "fmt" @@ -29,7 +30,7 @@ type KinesisConfiguration struct { configuration.DataSourceCommonCfg `yaml:",inline"` StreamName string `yaml:"stream_name"` StreamARN string `yaml:"stream_arn"` - UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` //Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords + UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` // Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords AwsProfile *string `yaml:"aws_profile"` AwsRegion string `yaml:"aws_region"` AwsEndpoint string `yaml:"aws_endpoint"` @@ -114,8 +115,8 @@ func (k *KinesisSource) newClient() error { func (k *KinesisSource) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, linesReadShards} - } + func (k *KinesisSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, linesReadShards} } @@ -188,7 +189,6 @@ func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) e func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { b := bytes.NewBuffer(record) r, err := gzip.NewReader(b) - if err != nil { k.logger.Error(err) return nil, err @@ -299,8 +299,8 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan var data []CloudwatchSubscriptionLogEvent var err error if k.Config.FromSubscription { - //The AWS docs says that the data is base64 encoded - //but apparently GetRecords decodes it for us ? + // The AWS docs says that the data is base64 encoded + // but apparently GetRecords decodes it for us ? data, err = k.decodeFromSubscription(record.Data) if err != nil { logger.Errorf("Cannot decode data: %s", err) @@ -335,9 +335,9 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { logger := k.logger.WithField("shard_id", shardId) - //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately - //and we won't be able to start a new one if this is the first one started by the tomb - //TODO: look into parent shards to see if a shard is closed before starting to read it ? + // ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately + // and we won't be able to start a new one if this is the first one started by the tomb + // TODO: look into parent shards to see if a shard is closed before starting to read it ? time.Sleep(time.Second) for { select { @@ -420,7 +420,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { case <-t.Dying(): k.logger.Infof("Kinesis source is dying") k.shardReaderTomb.Kill(nil) - _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) @@ -431,7 +431,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { if k.shardReaderTomb.Err() != nil { return k.shardReaderTomb.Err() } - //All goroutines have exited without error, so a resharding event, start again + // All goroutines have exited without error, so a resharding event, start again k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") continue } @@ -441,15 +441,17 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") - sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ + ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, - ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest)}) + ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest), + }) if err != nil { logger.Errorf("Cannot get shard iterator: %s", err) return fmt.Errorf("cannot get shard iterator: %w", err) } it := sharIt.ShardIterator - //AWS recommends to wait for a second between calls to GetRecords for a given shard + // AWS recommends to wait for a second between calls to GetRecords for a given shard ticker := time.NewTicker(time.Second) for { select { @@ -460,7 +462,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro switch err.(type) { case *kinesis.ProvisionedThroughputExceededException: logger.Warn("Provisioned throughput exceeded") - //TODO: implement exponential backoff + // TODO: implement exponential backoff continue case *kinesis.ExpiredIteratorException: logger.Warn("Expired iterator") @@ -506,7 +508,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error case <-t.Dying(): k.logger.Info("kinesis source is dying") k.shardReaderTomb.Kill(nil) - _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves return nil case <-k.shardReaderTomb.Dying(): reason := k.shardReaderTomb.Err() @@ -520,7 +522,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error } } -func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KinesisSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") if k.Config.UseEnhancedFanOut { diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 46e404aa49b..027cbde9240 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -3,6 +3,7 @@ package kinesisacquisition import ( "bytes" "compress/gzip" + "context" "encoding/json" "fmt" "net" @@ -60,8 +61,8 @@ func GenSubObject(i int) []byte { gz := gzip.NewWriter(&b) gz.Write(body) gz.Close() - //AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point - //localstack does not do it, so let's just write a raw gzipped stream + // AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point + // localstack does not do it, so let's just write a raw gzipped stream return b.Bytes() } @@ -99,10 +100,10 @@ func TestMain(m *testing.M) { os.Setenv("AWS_ACCESS_KEY_ID", "foobar") os.Setenv("AWS_SECRET_ACCESS_KEY", "foobar") - //delete_streams() - //create_streams() + // delete_streams() + // create_streams() code := m.Run() - //delete_streams() + // delete_streams() os.Exit(code) } @@ -149,6 +150,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } func TestReadFromStream(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -176,11 +178,11 @@ stream_name: stream-1-shard`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) for i := range test.count { @@ -193,6 +195,7 @@ stream_name: stream-1-shard`, } func TestReadFromMultipleShards(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -220,11 +223,11 @@ stream_name: stream-2-shards`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) c := 0 @@ -239,6 +242,7 @@ stream_name: stream-2-shards`, } func TestFromSubscription(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -267,11 +271,11 @@ from_subscription: true`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, true) for i := range test.count { diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 8ba5b2d06e0..f979b044dcc 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -135,7 +135,7 @@ func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tom return errors.New("k8s-audit datasource does not support one-shot acquisition") } -func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { ka.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") @@ -164,7 +164,6 @@ func (ka *KubernetesAuditSource) Dump() interface{} { } func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.Request) { - if ka.metricsLevel != configuration.METRICS_NONE { requestCount.WithLabelValues(ka.addr).Inc() } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index 020bd4c91a0..a086a756e4a 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -1,6 +1,7 @@ package kubernetesauditacquisition import ( + "context" "net/http/httptest" "strings" "testing" @@ -52,6 +53,7 @@ listen_addr: 0.0.0.0`, } func TestInvalidConfig(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -83,7 +85,7 @@ webhook_path: /k8s-audit`, err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) - f.StreamingAcquisition(out, tb) + f.StreamingAcquisition(ctx, out, tb) time.Sleep(1 * time.Second) tb.Kill(nil) @@ -98,6 +100,7 @@ webhook_path: /k8s-audit`, } func TestHandler(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -257,14 +260,14 @@ webhook_path: /k8s-audit`, req := httptest.NewRequest(test.method, "/k8s-audit", strings.NewReader(test.body)) w := httptest.NewRecorder() - f.StreamingAcquisition(out, tb) + f.StreamingAcquisition(ctx, out, tb) f.webhookHandler(w, req) res := w.Result() assert.Equal(t, test.expectedStatusCode, res.StatusCode) - //time.Sleep(1 * time.Second) + // time.Sleep(1 * time.Second) require.NoError(t, err) tb.Kill(nil) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 15c454723ee..f867feeb84b 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -319,9 +319,9 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri } } -func (l *LokiSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (l *LokiSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(context.Background(), l.Config.WaitForReady) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) defer cancel() err := l.Client.Ready(readyCtx) if err != nil { @@ -329,7 +329,7 @@ func (l *LokiSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er } ll := l.logger.WithField("websocket_url", l.lokiWebsocket) t.Go(func() error { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() respChan := l.Client.QueryRange(ctx, true) if err != nil { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 2fd2b61e995..627200217f5 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -439,7 +439,7 @@ query: > t.Fatalf("Unexpected error : %s", err) } - err = lokiSource.StreamingAcquisition(out, &lokiTomb) + err = lokiSource.StreamingAcquisition(ctx, out, &lokiTomb) cstest.AssertErrorContains(t, err, ts.streamErr) if ts.streamErr != "" { @@ -449,7 +449,7 @@ query: > time.Sleep(time.Second * 2) // We need to give time to start reading from the WS readTomb := tomb.Tomb{} - readCtx, cancel := context.WithTimeout(context.Background(), time.Second*10) + readCtx, cancel := context.WithTimeout(ctx, time.Second*10) count := 0 readTomb.Go(func() error { @@ -492,6 +492,7 @@ query: > } func TestStopStreaming(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -519,15 +520,13 @@ query: > lokiTomb := &tomb.Tomb{} - err = lokiSource.StreamingAcquisition(out, lokiTomb) + err = lokiSource.StreamingAcquisition(ctx, out, lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } time.Sleep(time.Second * 2) - ctx := context.Background() - err = feedLoki(ctx, subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index a9835ab4974..ed1964edebf 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -38,7 +38,7 @@ type S3Configuration struct { AwsEndpoint string `yaml:"aws_endpoint"` BucketName string `yaml:"bucket_name"` Prefix string `yaml:"prefix"` - Key string `yaml:"-"` //Only for DSN acquisition + Key string `yaml:"-"` // Only for DSN acquisition PollingMethod string `yaml:"polling_method"` PollingInterval int `yaml:"polling_interval"` SQSName string `yaml:"sqs_name"` @@ -338,7 +338,7 @@ func (s *S3Source) sqsPoll() error { out, err := s.sqsClient.ReceiveMessageWithContext(s.ctx, &sqs.ReceiveMessageInput{ QueueUrl: aws.String(s.Config.SQSName), MaxNumberOfMessages: aws.Int64(10), - WaitTimeSeconds: aws.Int64(20), //Probably no need to make it configurable ? + WaitTimeSeconds: aws.Int64(20), // Probably no need to make it configurable ? }) if err != nil { logger.Errorf("Error while polling SQS: %s", err) @@ -353,7 +353,7 @@ func (s *S3Source) sqsPoll() error { bucket, key, err := s.extractBucketAndPrefix(message.Body) if err != nil { logger.Errorf("Error while parsing SQS message: %s", err) - //Always delete the message to avoid infinite loop + // Always delete the message to avoid infinite loop _, err = s.sqsClient.DeleteMessage(&sqs.DeleteMessageInput{ QueueUrl: aws.String(s.Config.SQSName), ReceiptHandle: message.ReceiptHandle, @@ -379,7 +379,7 @@ func (s *S3Source) sqsPoll() error { } func (s *S3Source) readFile(bucket string, key string) error { - //TODO: Handle SSE-C + // TODO: Handle SSE-C var scanner *bufio.Scanner logger := s.logger.WithFields(log.Fields{ @@ -392,14 +392,13 @@ func (s *S3Source) readFile(bucket string, key string) error { Bucket: aws.String(bucket), Key: aws.String(key), }) - if err != nil { return fmt.Errorf("failed to get object %s/%s: %w", bucket, key, err) } defer output.Body.Close() if strings.HasSuffix(key, ".gz") { - //This *might* be a gzipped file, but sometimes the SDK will decompress the data for us (it's not clear when it happens, only had the issue with cloudtrail logs) + // This *might* be a gzipped file, but sometimes the SDK will decompress the data for us (it's not clear when it happens, only had the issue with cloudtrail logs) header := make([]byte, 2) _, err := output.Body.Read(header) if err != nil { @@ -613,7 +612,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * pathParts := strings.Split(args[0], "/") s.logger.Debugf("pathParts: %v", pathParts) - //FIXME: handle s3://bucket/ + // FIXME: handle s3://bucket/ if len(pathParts) == 1 { s.Config.BucketName = pathParts[0] s.Config.Prefix = "" @@ -656,7 +655,7 @@ func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error return err } } else { - //No key, get everything in the bucket based on the prefix + // No key, get everything in the bucket based on the prefix objects, err := s.getBucketContent() if err != nil { return err @@ -672,11 +671,11 @@ func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error return nil } -func (s *S3Source) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *S3Source) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { s.t = t s.out = out - s.readerChan = make(chan S3Object, 100) //FIXME: does this needs to be buffered? - s.ctx, s.cancel = context.WithCancel(context.Background()) + s.readerChan = make(chan S3Object, 100) // FIXME: does this needs to be buffered? + s.ctx, s.cancel = context.WithCancel(ctx) s.logger.Infof("starting acquisition of %s/%s", s.Config.BucketName, s.Config.Prefix) t.Go(func() error { s.readManager() diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 93e166dfec5..05a974517a0 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -272,6 +272,7 @@ func TestDSNAcquis(t *testing.T) { } func TestListPolling(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -331,7 +332,7 @@ prefix: foo/ } }() - err = f.StreamingAcquisition(out, &tb) + err = f.StreamingAcquisition(ctx, out, &tb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -348,6 +349,7 @@ prefix: foo/ } func TestSQSPoll(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -411,7 +413,7 @@ sqs_name: test } }() - err = f.StreamingAcquisition(out, &tb) + err = f.StreamingAcquisition(ctx, out, &tb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 06c32e62f77..5315096fb9b 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "context" "errors" "fmt" "net" @@ -105,7 +106,7 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { } if s.config.Addr == "" { - s.config.Addr = "127.0.0.1" //do we want a usable or secure default ? + s.config.Addr = "127.0.0.1" // do we want a usable or secure default ? } if s.config.Port == 0 { s.config.Port = 514 @@ -135,7 +136,7 @@ func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe return nil } -func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *SyslogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { c := make(chan syslogserver.SyslogMessage) s.server = &syslogserver.SyslogServer{Logger: s.logger.WithField("syslog", "internal"), MaxMessageLen: s.config.MaxMessageLen} s.server.SetChannel(c) @@ -152,7 +153,8 @@ func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, - appname string, pid string, msg string) string { + appname string, pid string, msg string, +) string { ret := "" if !ts.IsZero() { ret += ts.Format("Jan 2 15:04:05") @@ -178,7 +180,6 @@ func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, ret += msg } return ret - } func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c chan syslogserver.SyslogMessage) error { diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 1750f375138..57fa3e8747b 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "context" "fmt" "net" "runtime" @@ -80,6 +81,7 @@ func writeToSyslog(logs []string) { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -100,8 +102,10 @@ listen_addr: 127.0.0.1`, listen_port: 4242 listen_addr: 127.0.0.1`, expectedLines: 2, - logs: []string{`<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, - `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`}, + logs: []string{ + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`, + }, }, { name: "RFC3164", @@ -109,10 +113,12 @@ listen_addr: 127.0.0.1`, listen_port: 4242 listen_addr: 127.0.0.1`, expectedLines: 3, - logs: []string{`<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, + logs: []string{ + `<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, `<13>May 18 12:37:56 mantis sshd[49340]: blabla2`, `<13>May 18 12:37:56 mantis sshd: blabla2`, - `<13>May 18 12:37:56 mantis sshd`}, + `<13>May 18 12:37:56 mantis sshd`, + }, }, } if runtime.GOOS != "windows" { @@ -139,7 +145,7 @@ listen_addr: 127.0.0.1`, } tomb := tomb.Tomb{} out := make(chan types.Event) - err = s.StreamingAcquisition(out, &tomb) + err = s.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if ts.expectedErr != "" { return diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go index 44035d0a708..6d522d8d8cb 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -3,6 +3,7 @@ package wineventlogacquisition import ( + "context" "errors" "github.com/prometheus/client_golang/prometheus" @@ -59,7 +60,7 @@ func (w *WinEventLogSource) CanRun() error { return errors.New("windows event log acquisition is only supported on Windows") } -func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index 2ea0e365be5..ae6cb776909 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -3,6 +3,7 @@ package wineventlogacquisition import ( + "context" "runtime" "testing" "time" @@ -129,6 +130,7 @@ event_level: bla`, } func TestLiveAcquisition(t *testing.T) { + ctx := context.Background() if runtime.GOOS != "windows" { t.Skip("Skipping test on non-windows OS") } @@ -190,7 +192,7 @@ event_ids: c := make(chan types.Event) f := WinEventLogSource{} f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - f.StreamingAcquisition(c, to) + f.StreamingAcquisition(ctx, c, to) time.Sleep(time.Second) lines := test.expectedLines go func() { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 4f2384d71db..087c20eb70e 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -1,6 +1,7 @@ package wineventlogacquisition import ( + "context" "encoding/xml" "errors" "fmt" @@ -325,7 +326,7 @@ func (w *WinEventLogSource) CanRun() error { return nil } -func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/wineventlog/streaming") return w.getEvents(out, t) From b2ac65bfb6fd80435a6e64f43e645892fa31ca50 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 16 Oct 2024 16:55:18 +0200 Subject: [PATCH 317/318] avoid deadlock when deleting decisions if PAPI is half configured (#3283) --- pkg/apiserver/apiserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index bdf2d4148cc..35f9beaf635 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -255,7 +255,7 @@ func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APISer controller.AlertsAddChan = apiClient.AlertsAddChan - if config.ConsoleConfig.IsPAPIEnabled() { + if config.ConsoleConfig.IsPAPIEnabled() && config.OnlineClient.Credentials.PapiURL != "" { if apiClient.apiClient.IsEnrolled() { log.Info("Machine is enrolled in the console, Loading PAPI Client") @@ -340,7 +340,7 @@ func (s *APIServer) initAPIC(ctx context.Context) { // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios if s.apic.apiClient.IsEnrolled() { - if s.consoleConfig.IsPAPIEnabled() { + if s.consoleConfig.IsPAPIEnabled() && s.papi != nil { if s.papi.URL != "" { log.Info("Starting PAPI decision receiver") s.papi.pullTomb.Go(func() error { return s.papiPull(ctx) }) From d8bc17b17009467945dd47c9b90d061bbf6de899 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 16 Oct 2024 16:55:32 +0200 Subject: [PATCH 318/318] wineventlog: add support for replaying evtx files (#3278) --- go.mod | 2 +- go.sum | 2 + .../modules/wineventlog/test_files/Setup.evtx | Bin 0 -> 69632 bytes .../wineventlog/wineventlog_windows.go | 160 ++++++++++++++++-- ...og_test.go => wineventlog_windows_test.go} | 94 ++++++++-- pkg/exprhelpers/helpers.go | 2 +- pkg/exprhelpers/xml.go | 100 ++++++++--- 7 files changed, 313 insertions(+), 47 deletions(-) create mode 100644 pkg/acquisition/modules/wineventlog/test_files/Setup.evtx rename pkg/acquisition/modules/wineventlog/{wineventlog_test.go => wineventlog_windows_test.go} (73%) diff --git a/go.mod b/go.mod index b02d3b76840..f28f21c6eb4 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 - github.com/beevik/etree v1.3.0 + github.com/beevik/etree v1.4.1 github.com/blackfireio/osinfo v1.0.5 github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 diff --git a/go.sum b/go.sum index 7aaea1587b8..b2bd77c9915 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= +github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI= +github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx b/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx new file mode 100644 index 0000000000000000000000000000000000000000..2c4f8b0f680d0cf86be8d25825fc6a94576a5fe2 GIT binary patch literal 69632 zcmeI43yf6N8OOhwogHQ$%J2&c39?Ah6Pcbrlt@+rTV43oH9TccCb? zZm3#g+G-n3Oi8MtPZMiss|_XAXlrX~2@un?#zNYrG-~=turW3EvHkzg+yl#H=q||Y zEb0GD?%vnA_k8#F{qJ{P7glz6wsdu8ESxS14h&))u_Cj|NI1&}+UMQgSyOb&2fB$a z76B0u0TB=Z5fA|p5CIVo0TB=Z5f~MLmEE1oH}`HqA3vL>2kY?uAdWx2#O$r5W@~pJ zxF^VV&MV&kdvo7>aQF>o#?6jQF#CHk%83m>V-`!8EyXaz*`|XdTyrnVWSWUGnf?*v z-xD0?vE4Y<8M5ap?7tV~S+fs?%IFF1#dA*u+t(s*3+}l;bRXs!o<0)XbNDfCKL;-7 znl8Wm%va8zZ?+Hj_Wr)|jz=1jt6q;(L?Tumxp;Ew32za%7bN%p@Ur)he)WrAX}#?M zJPC6xtFpVX$p>WGUtVl0ZM$Wx*9Nfvc`WhUgOkr!6x$B#v2L8~$BE)dy=_Le0o!U< z+7sqV9FNLidYr@~miQ@ZD z*^5SohF*oQyx;n6jN4kf3wIuHC7MN-4GrB2ySzt_?@U;$D}6i4>OihLakc?Vix=bS z$71#g7~6xBX33;Q;G;Y_y zfpiNIvA78DRBUswOdZchi?%z_0gTBr+Cm`1Uzx8DvFdsEhl41OVPw(v~R!-X)-Ozl8+`!t(3Oh zkBfXj*z-=AEqB>yN4K-YV{JpemX_cRFFjsr%W%g57*4ee;7POhO-`_iID-P+UjMTy zW6{|=@tMwW(L~B+Xs8Ee_#m?Hxe`3U4rll_S5Vfyt7r$jX|$#EHzk2)HzH#{et{JU zOT-yk>Dv376Ck`fVCw-SODs_osdRE;FlPE3B)S|q(-2axvztz{pUxw9$I&E zDaj0g%vOZst6ux&CSMV7JGr|nwd)nTcvt4H|1O6m49D;ajbc5@v|<+}=b^qBq7Pp)o^|jU;ffJlZbI1|?o`ITl>^Xj#-bDP@O0(8;4YV79nW6F zBJaeHFBjh(Muq6ah$WbRVUYinGEU}S3lpe0;74V4Ad>MKmIL?2 zVsJJd5+vw!uAg_Vy2$1X8uYy!)lFN-2W{}f$;zM()k+(Dvkgbn2qPKy-p#h^6IXjd z_9LHHcpD(nqrG6%hwB*R!sRsJZj0gZ)poVjp~|YmuNkE^Xqr?eTH{)$P{mt+thTn{uq}hfkCoQwFd^5sc|H-)ZIvXtX zdkS9P4)4?VP4NB*zIV=s)9L>N)_6x4K`z_XvG7>!`fk>`8Reyk*jn>nZ+k9kKVY z9{#6J8lVCG+MRCF&<0za>~GY#o=FFCHM^}^mzR2?-ZtWj4*a-9buAvPj@TYS!_TEp z&Hwgom3fOtx!R+?#ITMYM?5+Lv%F;m)zSY&dz5jdB+$W2zrE(``O`tJddZg<*1-Ti zXwn0F^iz*xJko(2HCUQ)MGcl( zr@k~4HA)?|A|9bZ>e6HH?%bO{9WBk#PWB~+b@U=S#%o}fx2&K#nl`T5$!FG4e&W$6 z$6Tl*wwJj^+I!cd-$vcxyL%M3H4k9uIX^OMw|~C-d@6>t4nJi+57Ra z)sZhTtfTud2sH(rcyF0>lp`*Mb(F2=IF{1@y=2#Hc{f8DY&m7yMk$N)JXP@@{MD;1N_SH}T=i;kFGv2JHI18O^g*UqBlfvB7Y}|XS=g7}I zT6(H=ex7ta6wST?3ynS6uQXhIRA}Xy`jwyk!N|k>b)QzAf?N9PC$QzmhatwD#Fc z^QWg={Ze0ISWhWTGpL%YD7L)InQcoB(cj=jX_s!0qj&j8%Ut(BC z_hBOFQ7qoFg6e3z#-*{D4?2hT5l7}ir8w6q3`x2&Lg8n1C_ zZ1i*%acNpsM%L21<+A+gC|6waC5Cl$4C5ScVeys~R7V#|T*}Y<;#tNe)^q;aR{_2ZW0m}m4QZrxiPH7-9MEKS(U_Bs^aib)}FSwZzQWn9fO@?T-_y9bUiHM+lx zF*?NW@x!C}9J}WjDrcfoSUAeWu~?29ai9P2kY7UZ>pbSA9+{V;PyCl89Es&O6dQ4T zgy@u=U-aWl{@>&F{ql)Fj;)@2iD5lW$AnNDKI-+BNlz!n^T)BCf@)4H5fA|p5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p z5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p a5CIVo0TB=Z5fA|p5CIVo0TCE?1pW&`k>URU literal 0 HcmV?d00001 diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 087c20eb70e..ca40363155b 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -5,7 +5,9 @@ import ( "encoding/xml" "errors" "fmt" + "net/url" "runtime" + "strconv" "strings" "syscall" "time" @@ -30,7 +32,7 @@ type WinEventLogConfiguration struct { EventLevel string `yaml:"event_level"` EventIDs []int `yaml:"event_ids"` XPathQuery string `yaml:"xpath_query"` - EventFile string `yaml:"event_file"` + EventFile string PrettyName string `yaml:"pretty_name"` } @@ -48,10 +50,13 @@ type QueryList struct { } type Select struct { - Path string `xml:"Path,attr"` + Path string `xml:"Path,attr,omitempty"` Query string `xml:",chardata"` } +// 0 identifies the local machine in windows APIs +const localMachine = 0 + var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_winevtlogsource_hits_total", @@ -212,20 +217,28 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error } } -func (w *WinEventLogSource) generateConfig(query string) (*winlog.SubscribeConfig, error) { +func (w *WinEventLogSource) generateConfig(query string, live bool) (*winlog.SubscribeConfig, error) { var config winlog.SubscribeConfig var err error - // Create a subscription signaler. - config.SignalEvent, err = windows.CreateEvent( - nil, // Default security descriptor. - 1, // Manual reset. - 1, // Initial state is signaled. - nil) // Optional name. - if err != nil { - return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + if live { + // Create a subscription signaler. + config.SignalEvent, err = windows.CreateEvent( + nil, // Default security descriptor. + 1, // Manual reset. + 1, // Initial state is signaled. + nil) // Optional name. + if err != nil { + return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + } + config.Flags = wevtapi.EvtSubscribeToFutureEvents + } else { + config.ChannelPath, err = syscall.UTF16PtrFromString(w.config.EventFile) + if err != nil { + return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) + } + config.Flags = wevtapi.EvtQueryFilePath | wevtapi.EvtQueryForwardDirection } - config.Flags = wevtapi.EvtSubscribeToFutureEvents config.Query, err = syscall.UTF16PtrFromString(query) if err != nil { return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) @@ -283,7 +296,7 @@ func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, Metr return err } - w.evtConfig, err = w.generateConfig(w.query) + w.evtConfig, err = w.generateConfig(w.query, true) if err != nil { return err } @@ -292,6 +305,78 @@ func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, Metr } func (w *WinEventLogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { + if !strings.HasPrefix(dsn, "wineventlog://") { + return fmt.Errorf("invalid DSN %s for wineventlog source, must start with wineventlog://", dsn) + } + + w.logger = logger + w.config = WinEventLogConfiguration{} + + dsn = strings.TrimPrefix(dsn, "wineventlog://") + + args := strings.Split(dsn, "?") + + if args[0] == "" { + return errors.New("empty wineventlog:// DSN") + } + + if len(args) > 2 { + return errors.New("too many arguments in DSN") + } + + w.config.EventFile = args[0] + + if len(args) == 2 && args[1] != "" { + params, err := url.ParseQuery(args[1]) + if err != nil { + return fmt.Errorf("failed to parse DSN parameters: %w", err) + } + + for key, value := range params { + switch key { + case "log_level": + if len(value) != 1 { + return errors.New("log_level must be a single value") + } + lvl, err := log.ParseLevel(value[0]) + if err != nil { + return fmt.Errorf("failed to parse log_level: %s", err) + } + w.logger.Logger.SetLevel(lvl) + case "event_id": + for _, id := range value { + evtid, err := strconv.Atoi(id) + if err != nil { + return fmt.Errorf("failed to parse event_id: %s", err) + } + w.config.EventIDs = append(w.config.EventIDs, evtid) + } + case "event_level": + if len(value) != 1 { + return errors.New("event_level must be a single value") + } + w.config.EventLevel = value[0] + } + } + } + + var err error + + //FIXME: handle custom xpath query + w.query, err = w.buildXpathQuery() + + if err != nil { + return fmt.Errorf("buildXpathQuery failed: %w", err) + } + + w.logger.Debugf("query: %s\n", w.query) + + w.evtConfig, err = w.generateConfig(w.query, false) + + if err != nil { + return fmt.Errorf("generateConfig failed: %w", err) + } + return nil } @@ -300,10 +385,57 @@ func (w *WinEventLogSource) GetMode() string { } func (w *WinEventLogSource) SupportedModes() []string { - return []string{configuration.TAIL_MODE} + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} } func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + + handle, err := wevtapi.EvtQuery(localMachine, w.evtConfig.ChannelPath, w.evtConfig.Query, w.evtConfig.Flags) + + if err != nil { + return fmt.Errorf("EvtQuery failed: %v", err) + } + + defer winlog.Close(handle) + + publisherCache := make(map[string]windows.Handle) + defer func() { + for _, h := range publisherCache { + winlog.Close(h) + } + }() + +OUTER_LOOP: + for { + select { + case <-t.Dying(): + w.logger.Infof("wineventlog is dying") + return nil + default: + evts, err := w.getXMLEvents(w.evtConfig, publisherCache, handle, 500) + if err == windows.ERROR_NO_MORE_ITEMS { + log.Info("No more items") + break OUTER_LOOP + } else if err != nil { + return fmt.Errorf("getXMLEvents failed: %v", err) + } + w.logger.Debugf("Got %d events", len(evts)) + for _, evt := range evts { + w.logger.Tracef("Event: %s", evt) + if w.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": w.name}).Inc() + } + l := types.Line{} + l.Raw = evt + l.Module = w.GetName() + l.Labels = w.config.Labels + l.Time = time.Now() + l.Src = w.name + l.Process = true + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + } + } + } return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go similarity index 73% rename from pkg/acquisition/modules/wineventlog/wineventlog_test.go rename to pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index ae6cb776909..9afef963669 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -4,7 +4,6 @@ package wineventlogacquisition import ( "context" - "runtime" "testing" "time" @@ -19,9 +18,8 @@ import ( ) func TestBadConfiguration(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + tests := []struct { config string expectedErr string @@ -64,9 +62,8 @@ xpath_query: test`, } func TestQueryBuilder(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + tests := []struct { config string expectedQuery string @@ -130,10 +127,8 @@ event_level: bla`, } func TestLiveAcquisition(t *testing.T) { + exprhelpers.Init(nil) ctx := context.Background() - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } tests := []struct { config string @@ -227,3 +222,82 @@ event_ids: to.Wait() } } + +func TestOneShotAcquisition(t *testing.T) { + tests := []struct { + name string + dsn string + expectedCount int + expectedErr string + expectedConfigureErr string + }{ + { + name: "non-existing file", + dsn: `wineventlog://foo.evtx`, + expectedCount: 0, + expectedErr: "The system cannot find the file specified.", + }, + { + name: "empty DSN", + dsn: `wineventlog://`, + expectedCount: 0, + expectedConfigureErr: "empty wineventlog:// DSN", + }, + { + name: "existing file", + dsn: `wineventlog://test_files/Setup.evtx`, + expectedCount: 24, + expectedErr: "", + }, + { + name: "filter on event_id", + dsn: `wineventlog://test_files/Setup.evtx?event_id=2`, + expectedCount: 1, + }, + { + name: "filter on event_id", + dsn: `wineventlog://test_files/Setup.evtx?event_id=2&event_id=3`, + expectedCount: 24, + }, + } + + exprhelpers.Init(nil) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lineCount := 0 + to := &tomb.Tomb{} + c := make(chan types.Event) + f := WinEventLogSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + + if test.expectedConfigureErr != "" { + assert.Contains(t, err.Error(), test.expectedConfigureErr) + return + } + + require.NoError(t, err) + + go func() { + for { + select { + case <-c: + lineCount++ + case <-to.Dying(): + return + } + } + }() + + err = f.OneShotAcquisition(c, to) + if test.expectedErr != "" { + assert.Contains(t, err.Error(), test.expectedErr) + } else { + require.NoError(t, err) + + time.Sleep(2 * time.Second) + assert.Equal(t, test.expectedCount, lineCount) + } + }) + } +} diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 6b7eb0840e9..9bc991a8f2d 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -129,7 +129,7 @@ func Init(databaseClient *database.Client) error { dataFileRegex = make(map[string][]*regexp.Regexp) dataFileRe2 = make(map[string][]*re2.Regexp) dbClient = databaseClient - + XMLCacheInit() return nil } diff --git a/pkg/exprhelpers/xml.go b/pkg/exprhelpers/xml.go index 75758e18316..0b550bdb641 100644 --- a/pkg/exprhelpers/xml.go +++ b/pkg/exprhelpers/xml.go @@ -1,43 +1,103 @@ package exprhelpers import ( + "errors" + "sync" + "time" + "github.com/beevik/etree" + "github.com/bluele/gcache" + "github.com/cespare/xxhash/v2" log "github.com/sirupsen/logrus" ) -var pathCache = make(map[string]etree.Path) +var ( + pathCache = make(map[string]etree.Path) + rwMutex = sync.RWMutex{} + xmlDocumentCache gcache.Cache +) + +func compileOrGetPath(path string) (etree.Path, error) { + rwMutex.RLock() + compiledPath, ok := pathCache[path] + rwMutex.RUnlock() + + if !ok { + var err error + compiledPath, err = etree.CompilePath(path) + if err != nil { + return etree.Path{}, err + } + + rwMutex.Lock() + pathCache[path] = compiledPath + rwMutex.Unlock() + } + + return compiledPath, nil +} + +func getXMLDocumentFromCache(xmlString string) (*etree.Document, error) { + cacheKey := xxhash.Sum64String(xmlString) + cacheObj, err := xmlDocumentCache.Get(cacheKey) + + if err != nil && !errors.Is(err, gcache.KeyNotFoundError) { + return nil, err + } + + doc, ok := cacheObj.(*etree.Document) + if !ok || cacheObj == nil { + doc = etree.NewDocument() + if err := doc.ReadFromString(xmlString); err != nil { + return nil, err + } + if err := xmlDocumentCache.Set(cacheKey, doc); err != nil { + log.Warnf("Could not set XML document in cache: %s", err) + } + } + + return doc, nil +} + +func XMLCacheInit() { + gc := gcache.New(50) + // Short cache expiration because we each line we read is different, but we can call multiple times XML helpers on each of them + gc.Expiration(5 * time.Second) + gc = gc.LRU() + + xmlDocumentCache = gc.Build() +} // func XMLGetAttributeValue(xmlString string, path string, attributeName string) string { func XMLGetAttributeValue(params ...any) (any, error) { xmlString := params[0].(string) path := params[1].(string) attributeName := params[2].(string) - if _, ok := pathCache[path]; !ok { - compiledPath, err := etree.CompilePath(path) - if err != nil { - log.Errorf("Could not compile path %s: %s", path, err) - return "", nil - } - pathCache[path] = compiledPath + + compiledPath, err := compileOrGetPath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "", nil } - compiledPath := pathCache[path] - doc := etree.NewDocument() - err := doc.ReadFromString(xmlString) + doc, err := getXMLDocumentFromCache(xmlString) if err != nil { log.Tracef("Could not parse XML: %s", err) return "", nil } + elem := doc.FindElementPath(compiledPath) if elem == nil { log.Debugf("Could not find element %s", path) return "", nil } + attr := elem.SelectAttr(attributeName) if attr == nil { log.Debugf("Could not find attribute %s", attributeName) return "", nil } + return attr.Value, nil } @@ -45,26 +105,24 @@ func XMLGetAttributeValue(params ...any) (any, error) { func XMLGetNodeValue(params ...any) (any, error) { xmlString := params[0].(string) path := params[1].(string) - if _, ok := pathCache[path]; !ok { - compiledPath, err := etree.CompilePath(path) - if err != nil { - log.Errorf("Could not compile path %s: %s", path, err) - return "", nil - } - pathCache[path] = compiledPath + + compiledPath, err := compileOrGetPath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "", nil } - compiledPath := pathCache[path] - doc := etree.NewDocument() - err := doc.ReadFromString(xmlString) + doc, err := getXMLDocumentFromCache(xmlString) if err != nil { log.Tracef("Could not parse XML: %s", err) return "", nil } + elem := doc.FindElementPath(compiledPath) if elem == nil { log.Debugf("Could not find element %s", path) return "", nil } + return elem.Text(), nil }